signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def dfs_recursive ( graph , node , seen ) :
"""DFS , detect connected component , recursive implementation
: param graph : directed graph in listlist or listdict format
: param int node : to start graph exploration
: param boolean - table seen : will be set true for the connected component
containing node .
: complexity : ` O ( | V | + | E | ) `""" | seen [ node ] = True
for neighbor in graph [ node ] :
if not seen [ neighbor ] :
dfs_recursive ( graph , neighbor , seen ) |
def flds_sort ( d , s ) :
'''Sort based on position . Sort with s as a tuple of the sort
indices and shape from first sort .
Parameters :
d - - the flds / sclr data
s - - ( si , shape ) sorting and shaping data from firstsort .''' | labels = [ key for key in d . keys ( ) if key not in [ 't' , 'xs' , 'ys' , 'zs' , 'fd' , 'sd' ] ] ;
si , shape = s ;
for l in labels :
d [ l ] = d [ l ] [ si ] . reshape ( shape ) ;
d [ l ] = np . squeeze ( d [ l ] ) ;
return d ; |
def send_message ( self , * args , ** kwargs ) :
'''Wrapped method would accept new ` queued ` and ` isgroup `
OPTIONAL arguments''' | return super ( MQBot , self ) . send_message ( * args , ** kwargs ) |
def hex_to_name ( hex_value , spec = u'css3' ) :
"""Convert a hexadecimal color value to its corresponding normalized
color name , if any such name exists .
The optional keyword argument ` ` spec ` ` determines which
specification ' s list of color names will be used ; valid values are
` ` html4 ` ` , ` ` css2 ` ` , ` ` css21 ` ` and ` ` css3 ` ` , and the default is
` ` css3 ` ` .
When no color name for the value is found in the given
specification , ` ` ValueError ` ` is raised .""" | if spec not in SUPPORTED_SPECIFICATIONS :
raise ValueError ( SPECIFICATION_ERROR_TEMPLATE . format ( spec = spec ) )
normalized = normalize_hex ( hex_value )
name = { u'css2' : CSS2_HEX_TO_NAMES , u'css21' : CSS21_HEX_TO_NAMES , u'css3' : CSS3_HEX_TO_NAMES , u'html4' : HTML4_HEX_TO_NAMES } [ spec ] . get ( normalized )
if name is None :
raise ValueError ( u"'{}' has no defined color name in {}" . format ( hex_value , spec ) )
return name |
def add_phrases ( self , corpus ) :
'''Parameters
corpus : Corpus for phrase augmentation
Returns
New ParsedCorpus containing unigrams in corpus and new phrases''' | from gensim . models import Phrases
assert isinstance ( corpus , ParsedCorpus )
self . phrases = [ Phrases ( CorpusAdapterForGensim . get_sentences ( corpus ) , delimiter = ' ' ) ]
for i in range ( 1 , self . max_tokens_per_phrase ) :
self . phrases . append ( Phrases ( self . phrases [ - 1 ] [ CorpusAdapterForGensim . get_sentences ( corpus ) ] ) )
return self |
def loadSVrecs ( fname , uselines = None , skiprows = 0 , linefixer = None , delimiter_regex = None , verbosity = DEFAULT_VERBOSITY , ** metadata ) :
"""Load a separated value text file to a list of lists of strings of records .
Takes a tabular text file with a specified delimeter and end - of - line
character , and return data as a list of lists of strings corresponding to
records ( rows ) . Also uses and returns metadata ( including column names ,
formats , coloring , & c . ) if these items are determined during the loading
process .
* * Parameters * *
* * fname * * : string or file object
Path ( or file object ) corresponding to a separated variable
( CSV ) text file .
* * delimiter * * : single - character string
When reading text file , character to use as delimiter to split
fields . If not specified , the delimiter is determined first by
looking for special - format metadata specifying the delimiter , and
then if no specification is found , attempts are made to infer
delimiter from file contents . ( See * * inflines * * parameter below . )
* * delimiter _ regex * * : regular expression ( compiled or in string format )
Regular expression to use to recognize delimiters , in place of a
single character . ( For instance , to have whitespace delimiting ,
using delimiter _ regex = ' [ \ s * ] + ' )
* * lineterminator * * : single - character string
Line terminator to use when reading in using SVfile .
* * skipinitialspace * * : boolean
If true , strips whitespace following the delimiter from field .
The * * delimiter * * , * * linterminator * * and * * skipinitialspace * *
parameters are passed on as parameters to the python CSV module , which is
used for reading in delimited text files . Additional parameters from
that interface that are replicated in this constructor include
* * quotechar * * , * * escapechar * * , * * quoting * * , * * doublequote * * and
* * dialect * * ( see CSV module documentation for more information ) .
* * skiprows * * : non - negative integer , optional
When reading from a text file , the first ` skiprows ` lines are
ignored . Default is 0 , e . g no rows are skipped .
* * uselines * * : pair of non - negative integer , optional
When reading from a text file , range of lines of data to load . ( In
contrast to * * skiprows * * , which specifies file rows to ignore
before looking for header information , * * uselines * * specifies which
data ( non - header ) lines to use , after header has been striped and
processed . ) See * * headerlines * * below .
* * comments * * : single - character string , optional
When reading from a text file , character used to distinguish header
lines . If specified , any lines beginning with this character at the
top of the file are assumed to contain header information and not
row data .
* * headerlines * * : integer , optional
When reading from a text file , the number of lines at the top of the
file ( after the first ` skiprows ` lines ) corresponding to the header
of the file , where metadata can be found . Lines after headerlines
are assumed to contain row contents . If not specified , value is
determined first by looking for special metametadata in first line
of file ( see Tabular reference documentation for more information
about this ) , and if no such metadata is found , is inferred by
looking at file contents .
* * namesinheader * * : Boolean , optional
When reading from a text file , if ` namesinheader = = True ` , then
assume the column names are in the last header line ( unless
overridden by existing metadata or metametadata directive ) . Default
is True .
* * linefixer * * : callable , optional
This callable is applied to every line in the file . If specified ,
the called is applied directly to the strings in the file , after
they ' re split in lines but before they ' re split into fields . The
purpose is to make lines with errors or mistakes amenable to
delimiter inference and field - splitting .
* * inflines * * : integer , optional
Number of lines of file to use as sample data when inferring
delimiter and header .
* * metametadata * * : dictionary of integers or pairs of integers
Specifies supplementary metametadata information for use
with SVfile loading . See Tabular reference documentation for more
information
* * Returns * *
* * records * * : list of lists of strings
List of lists corresponding to records ( rows ) of data .
* * metadata * * : dictionary
Metadata read and constructed during process of reading file .
* * See Also : * *
: func : ` tabular . io . loadSV ` , : func : ` tabular . io . saveSV ` ,
: func : ` tabular . io . DEFAULT _ TYPEINFERER `""" | if delimiter_regex and isinstance ( delimiter_regex , types . StringType ) :
import re
delimiter_regex = re . compile ( delimiter_regex )
[ metadata , inferedlines , WHOLETHING ] = getmetadata ( fname , skiprows = skiprows , linefixer = linefixer , delimiter_regex = delimiter_regex , verbosity = verbosity , ** metadata )
if uselines is None :
uselines = ( 0 , False )
if is_string_like ( fname ) :
fh = file ( fname , 'rU' )
elif hasattr ( fname , 'readline' ) :
fh = fname
else :
raise ValueError ( 'fname must be a string or file handle' )
for _ind in range ( skiprows + uselines [ 0 ] + metadata [ 'headerlines' ] ) :
fh . readline ( )
if linefixer or delimiter_regex :
fh2 = tempfile . TemporaryFile ( 'w+b' )
F = fh . read ( ) . strip ( '\n' ) . split ( '\n' )
if linefixer :
F = map ( linefixer , F )
if delimiter_regex :
F = map ( lambda line : delimiter_regex . sub ( metadata [ 'dialect' ] . delimiter , line ) , F )
fh2 . write ( '\n' . join ( F ) )
fh2 . seek ( 0 )
fh = fh2
reader = csv . reader ( fh , dialect = metadata [ 'dialect' ] )
if uselines [ 1 ] :
linelist = [ ]
for ln in reader :
if reader . line_num <= uselines [ 1 ] - uselines [ 0 ] :
linelist . append ( ln )
else :
break
else :
linelist = list ( reader )
fh . close ( )
if linelist [ - 1 ] == [ ] :
linelist . pop ( - 1 )
return [ linelist , metadata ] |
def ssh ( lancet , print_cmd , environment ) :
"""SSH into the given environment , based on the dploi configuration .""" | namespace = { }
with open ( lancet . config . get ( 'dploi' , 'deployment_spec' ) ) as fh :
code = compile ( fh . read ( ) , 'deployment.py' , 'exec' )
exec ( code , { } , namespace )
config = namespace [ 'settings' ] [ environment ]
host = '{}@{}' . format ( config [ 'user' ] , config [ 'hosts' ] [ 0 ] )
cmd = [ 'ssh' , '-p' , str ( config . get ( 'port' , 22 ) ) , host ]
if print_cmd :
click . echo ( ' ' . join ( quote ( s ) for s in cmd ) )
else :
lancet . defer_to_shell ( * cmd ) |
def consonants ( self ) :
"""Return a new IPAString , containing only the consonants in the current string .
: rtype : IPAString""" | return IPAString ( ipa_chars = [ c for c in self . ipa_chars if c . is_consonant ] ) |
def serve ( ) :
"""main entry point""" | logging . getLogger ( ) . setLevel ( logging . DEBUG )
logging . info ( 'Python Tornado Crossdock Server Running ...' )
server = Server ( DefaultServerPortTChannel )
endtoend_handler = EndToEndHandler ( )
app = make_app ( server , endtoend_handler )
app . listen ( DefaultClientPortHTTP )
app . listen ( DefaultServerPortHTTP )
server . tchannel . listen ( )
tornado . ioloop . IOLoop . current ( ) . start ( ) |
def check_lazy_load_terreinobject ( f ) :
'''Decorator function to lazy load a : class : ` Terreinobject ` .''' | def wrapper ( * args ) :
terreinobject = args [ 0 ]
if ( terreinobject . _centroid is None or terreinobject . _bounding_box is None or terreinobject . _metadata is None ) :
log . debug ( 'Lazy loading Terreinobject %s' , terreinobject . id )
terreinobject . check_gateway ( )
t = terreinobject . gateway . get_terreinobject_by_id ( terreinobject . id )
terreinobject . _centroid = t . _centroid
terreinobject . _bounding_box = t . _bounding_box
terreinobject . _metadata = t . _metadata
return f ( * args )
return wrapper |
def find_proc_date ( header ) :
"""Search the HISTORY fields of a header looking for the FLIPS
processing date .""" | import string , re
for h in header . ascardlist ( ) :
if h . key == "HISTORY" :
g = h . value
if ( string . find ( g , 'FLIPS 1.0 -:' ) ) :
result = re . search ( 'imred: FLIPS 1.0 - \S{3} (.*) - ([\s\d]\d:\d\d:\d\d)\s*$' , g )
if result :
date = result . group ( 1 )
time = result . group ( 2 )
datetime = date + " " + time
return datetime
return None |
def get ( self , ph_type , default = None ) :
"""Return the first placeholder shape with type * ph _ type * ( e . g . ' body ' ) ,
or * default * if no such placeholder shape is present in the
collection .""" | for placeholder in self :
if placeholder . ph_type == ph_type :
return placeholder
return default |
def TryRun ( self , text , extension ) :
"""Compiles and runs the program given in text , using extension
as file extension ( e . g . ' . c ' ) . Returns ( 1 , outputStr ) on success ,
(0 , ' ' ) otherwise . The target ( a file containing the program ' s stdout )
is saved in self . lastTarget ( for further processing ) .""" | ok = self . TryLink ( text , extension )
if ( ok ) :
prog = self . lastTarget
pname = prog . get_internal_path ( )
output = self . confdir . File ( os . path . basename ( pname ) + '.out' )
node = self . env . Command ( output , prog , [ [ pname , ">" , "${TARGET}" ] ] )
ok = self . BuildNodes ( node )
if ok :
outputStr = SCons . Util . to_str ( output . get_contents ( ) )
return ( 1 , outputStr )
return ( 0 , "" ) |
def s_get ( self , quant ) :
"""Return a number using the given quantity of signed bits .""" | if quant < 2 : # special case , just return that unsigned value
# quant can also be 0
return self . u_get ( quant )
sign = self . u_get ( 1 )
raw_number = self . u_get ( quant - 1 )
if sign == 0 : # positive , simplest case
number = raw_number
else : # negative , complemento a 2
complement = 2 ** ( quant - 1 ) - 1
number = - 1 * ( ( raw_number ^ complement ) + 1 )
return number |
def Parse ( self , raw_data ) :
"""Take the data and yield results that passed through the filters .
The output of each filter is added to a result set . So long as the filter
selects , but does not modify , raw data , the result count will remain
accurate .
Args :
raw _ data : An iterable series of rdf values .
Returns :
A list of rdf values that matched at least one filter .""" | self . results = set ( )
if not self . filters :
self . results . update ( raw_data )
else :
for f in self . filters :
self . results . update ( f . Parse ( raw_data ) )
return list ( self . results ) |
def add_row ( self , label = '' , item = '' ) :
"""Add a row to the grid""" | self . AppendRows ( 1 )
last_row = self . GetNumberRows ( ) - 1
self . SetCellValue ( last_row , 0 , str ( label ) )
self . row_labels . append ( label )
self . row_items . append ( item ) |
def result ( self , timeout = None ) :
"""Waits up to timeout for the result the threaded job .
Returns immediately the result if the job has already been done .
: param timeout : The maximum time to wait for a result ( in seconds )
: raise OSError : The timeout raised before the job finished
: raise Exception : Raises the exception that occurred executing
the method""" | if self . _done_event . wait ( timeout ) or self . _done_event . is_set ( ) :
if self . _exception is not None :
raise self . _exception
return self . _result
raise OSError ( "Timeout raised" ) |
def set_lock ( key , value = None , expiry_time = 60 ) :
"""Force to set a distribute lock""" | from uliweb . utils . common import get_uuid
redis = get_redis ( )
value = value or get_uuid ( )
return redis . set ( key , value , ex = expiry_time , xx = True ) |
def _MakeRanges ( pairs ) :
"""Turn a list like [ ( 65,97 ) , ( 66 , 98 ) , . . . , ( 90,122 ) ]
into [ ( 65 , 90 , + 32 ) ] .""" | ranges = [ ]
last = - 100
def evenodd ( last , a , b , r ) :
if a != last + 1 or b != _AddDelta ( a , r [ 2 ] ) :
return False
r [ 1 ] = a
return True
def evenoddpair ( last , a , b , r ) :
if a != last + 2 :
return False
delta = r [ 2 ]
d = delta
if type ( delta ) is not str :
return False
if delta . endswith ( 'Skip' ) :
d = delta [ : - 4 ]
else :
delta = d + 'Skip'
if b != _AddDelta ( a , d ) :
return False
r [ 1 ] = a
r [ 2 ] = delta
return True
for a , b in pairs :
if ranges and evenodd ( last , a , b , ranges [ - 1 ] ) :
pass
elif ranges and evenoddpair ( last , a , b , ranges [ - 1 ] ) :
pass
else :
ranges . append ( [ a , a , _Delta ( a , b ) ] )
last = a
return ranges |
def blend_mode ( self ) :
"""BlendMode : The blend mode used for drawing operations .""" | blend_mode_ptr = ffi . new ( 'int *' )
check_int_err ( lib . SDL_GetRenderDrawBlendMode ( self . _ptr , blend_mode_ptr ) )
return BlendMode ( blend_mode_ptr [ 0 ] ) |
def getSpec ( cls ) :
"""Return the Spec for ApicalTMSequenceRegion .""" | spec = { "description" : ApicalTMSequenceRegion . __doc__ , "singleNodeOnly" : True , "inputs" : { "activeColumns" : { "description" : ( "An array of 0's and 1's representing the active " "minicolumns, i.e. the input to the TemporalMemory" ) , "dataType" : "Real32" , "count" : 0 , "required" : True , "regionLevel" : True , "isDefaultInput" : True , "requireSplitterMap" : False } , "resetIn" : { "description" : ( "A boolean flag that indicates whether" " or not the input vector received in this compute cycle" " represents the first presentation in a" " new temporal sequence." ) , "dataType" : "Real32" , "count" : 1 , "required" : False , "regionLevel" : True , "isDefaultInput" : False , "requireSplitterMap" : False } , "apicalInput" : { "description" : "An array of 0's and 1's representing top down input." " The input will be provided to apical dendrites." , "dataType" : "Real32" , "count" : 0 , "required" : False , "regionLevel" : True , "isDefaultInput" : False , "requireSplitterMap" : False } , "apicalGrowthCandidates" : { "description" : ( "An array of 0's and 1's representing apical input " "that can be learned on new synapses on apical " "segments. If this input is a length-0 array, the " "whole apicalInput is used." ) , "dataType" : "Real32" , "count" : 0 , "required" : False , "regionLevel" : True , "isDefaultInput" : False , "requireSplitterMap" : False } , } , "outputs" : { "nextPredictedCells" : { "description" : ( "A binary output containing a 1 for every " "cell that is predicted for the next timestep." ) , "dataType" : "Real32" , "count" : 0 , "regionLevel" : True , "isDefaultOutput" : False } , "predictedActiveCells" : { "description" : ( "A binary output containing a 1 for every " "cell that transitioned from predicted to active." ) , "dataType" : "Real32" , "count" : 0 , "regionLevel" : True , "isDefaultOutput" : False } , "activeCells" : { "description" : ( "A binary output containing a 1 for every " "cell that is currently active." ) , "dataType" : "Real32" , "count" : 0 , "regionLevel" : True , "isDefaultOutput" : True } , "winnerCells" : { "description" : ( "A binary output containing a 1 for every " "'winner' cell in the TM." ) , "dataType" : "Real32" , "count" : 0 , "regionLevel" : True , "isDefaultOutput" : False } , } , "parameters" : { # Input sizes ( the network API doesn ' t provide these during initialize )
"columnCount" : { "description" : ( "The size of the 'activeColumns' input " + "(i.e. the number of columns)" ) , "accessMode" : "Read" , "dataType" : "UInt32" , "count" : 1 , "constraints" : "" } , "apicalInputWidth" : { "description" : "The size of the 'apicalInput' input" , "accessMode" : "Read" , "dataType" : "UInt32" , "count" : 1 , "constraints" : "" } , "learn" : { "description" : "True if the TM should learn." , "accessMode" : "ReadWrite" , "dataType" : "Bool" , "count" : 1 , "defaultValue" : "true" } , "cellsPerColumn" : { "description" : "Number of cells per column" , "accessMode" : "Read" , "dataType" : "UInt32" , "count" : 1 , "constraints" : "" } , "activationThreshold" : { "description" : ( "If the number of active connected synapses on a " "segment is at least this threshold, the segment " "is said to be active." ) , "accessMode" : "Read" , "dataType" : "UInt32" , "count" : 1 , "constraints" : "" } , "reducedBasalThreshold" : { "description" : ( "Activation threshold of basal segments for cells " "with active apical segments (with apicalTiebreak " "implementation). " ) , "accessMode" : "Read" , "dataType" : "UInt32" , "count" : 1 , "constraints" : "" } , "initialPermanence" : { "description" : "Initial permanence of a new synapse." , "accessMode" : "Read" , "dataType" : "Real32" , "count" : 1 , "constraints" : "" } , "connectedPermanence" : { "description" : ( "If the permanence value for a synapse is greater " "than this value, it is said to be connected." ) , "accessMode" : "Read" , "dataType" : "Real32" , "count" : 1 , "constraints" : "" } , "minThreshold" : { "description" : ( "If the number of synapses active on a segment is at " "least this threshold, it is selected as the best " "matching cell in a bursting column." ) , "accessMode" : "Read" , "dataType" : "UInt32" , "count" : 1 , "constraints" : "" } , "sampleSize" : { "description" : ( "The desired number of active synapses for an " + "active cell" ) , "accessMode" : "Read" , "dataType" : "UInt32" , "count" : 1 } , "learnOnOneCell" : { "description" : ( "If True, the winner cell for each column will be" " fixed between resets." ) , "accessMode" : "Read" , "dataType" : "Bool" , "count" : 1 , "defaultValue" : "false" } , "maxSynapsesPerSegment" : { "description" : "The maximum number of synapses per segment" , "accessMode" : "Read" , "dataType" : "UInt32" , "count" : 1 } , "maxSegmentsPerCell" : { "description" : "The maximum number of segments per cell" , "accessMode" : "Read" , "dataType" : "UInt32" , "count" : 1 } , "permanenceIncrement" : { "description" : ( "Amount by which permanences of synapses are " "incremented during learning." ) , "accessMode" : "Read" , "dataType" : "Real32" , "count" : 1 } , "permanenceDecrement" : { "description" : ( "Amount by which permanences of synapses are " "decremented during learning." ) , "accessMode" : "Read" , "dataType" : "Real32" , "count" : 1 } , "basalPredictedSegmentDecrement" : { "description" : ( "Amount by which active permanences of synapses of " "previously predicted but inactive segments are " "decremented." ) , "accessMode" : "Read" , "dataType" : "Real32" , "count" : 1 } , "apicalPredictedSegmentDecrement" : { "description" : ( "Amount by which active permanences of synapses of " "previously predicted but inactive segments are " "decremented." ) , "accessMode" : "Read" , "dataType" : "Real32" , "count" : 1 } , "seed" : { "description" : "Seed for the random number generator." , "accessMode" : "Read" , "dataType" : "UInt32" , "count" : 1 } , "implementation" : { "description" : "Apical implementation" , "accessMode" : "Read" , "dataType" : "Byte" , "count" : 0 , "constraints" : ( "enum: ApicalTiebreak, ApicalTiebreakCPP, ApicalDependent" ) , "defaultValue" : "ApicalTiebreakCPP" } , } , }
return spec |
def _param_fields ( kwargs , fields ) :
"""Normalize the " fields " argument to most find methods""" | if fields is None :
return
if type ( fields ) in [ list , set , frozenset , tuple ] :
fields = { x : True for x in fields }
if type ( fields ) == dict :
fields . setdefault ( "_id" , False )
kwargs [ "projection" ] = fields |
def verify_data ( self , data ) :
'''Verify the data , return an error statement if something is wrong''' | errors = [ ]
if 'state' not in data :
errors . append ( 'Missing "state" data' )
if 'fun' not in data :
errors . append ( 'Missing "fun" data' )
if 'name' not in data :
errors . append ( 'Missing "name" data' )
if data [ 'name' ] and not isinstance ( data [ 'name' ] , six . string_types ) :
errors . append ( 'ID \'{0}\' {1}is not formed as a string, but is a {2}' . format ( data [ 'name' ] , 'in SLS \'{0}\' ' . format ( data [ '__sls__' ] ) if '__sls__' in data else '' , type ( data [ 'name' ] ) . __name__ ) )
if errors :
return errors
full = data [ 'state' ] + '.' + data [ 'fun' ]
if full not in self . states :
if '__sls__' in data :
errors . append ( 'State \'{0}\' was not found in SLS \'{1}\'' . format ( full , data [ '__sls__' ] ) )
reason = self . states . missing_fun_string ( full )
if reason :
errors . append ( 'Reason: {0}' . format ( reason ) )
else :
errors . append ( 'Specified state \'{0}\' was not found' . format ( full ) )
else : # First verify that the parameters are met
aspec = salt . utils . args . get_function_argspec ( self . states [ full ] )
arglen = 0
deflen = 0
if isinstance ( aspec . args , list ) :
arglen = len ( aspec . args )
if isinstance ( aspec . defaults , tuple ) :
deflen = len ( aspec . defaults )
for ind in range ( arglen - deflen ) :
if aspec . args [ ind ] not in data :
errors . append ( 'Missing parameter {0} for state {1}' . format ( aspec . args [ ind ] , full ) )
# If this chunk has a recursive require , then it will cause a
# recursive loop when executing , check for it
reqdec = ''
if 'require' in data :
reqdec = 'require'
if 'watch' in data : # Check to see if the service has a mod _ watch function , if it does
# not , then just require
# to just require extend the require statement with the contents
# of watch so that the mod _ watch function is not called and the
# requisite capability is still used
if '{0}.mod_watch' . format ( data [ 'state' ] ) not in self . states :
if 'require' in data :
data [ 'require' ] . extend ( data . pop ( 'watch' ) )
else :
data [ 'require' ] = data . pop ( 'watch' )
reqdec = 'require'
else :
reqdec = 'watch'
if reqdec :
for req in data [ reqdec ] :
reqfirst = next ( iter ( req ) )
if data [ 'state' ] == reqfirst :
if ( fnmatch . fnmatch ( data [ 'name' ] , req [ reqfirst ] ) or fnmatch . fnmatch ( data [ '__id__' ] , req [ reqfirst ] ) ) :
err = ( 'Recursive require detected in SLS {0} for' ' require {1} in ID {2}' ) . format ( data [ '__sls__' ] , req , data [ '__id__' ] )
errors . append ( err )
return errors |
def get_task_ops ( task_type = TaskType . ALG_CTRL ) :
"""Returns an operations list based on the specified task index .
Args :
task _ type : indicates the task type used .
Returns :
List of the eligible ops .""" | try :
return LearnToExecuteState . TASK_TYPE_OPS [ task_type ]
except KeyError :
raise KeyError ( "Bad task_type '%s', check config." % task_type ) |
def create_run_config ( hp , output_dir = None ) :
"""Create a run config .
Args :
hp : model hyperparameters
output _ dir : model ' s output directory , defaults to output _ dir flag .
Returns :
a run config""" | save_ckpt_steps = max ( FLAGS . iterations_per_loop , FLAGS . local_eval_frequency )
save_ckpt_secs = FLAGS . save_checkpoints_secs or None
if save_ckpt_secs :
save_ckpt_steps = None
assert FLAGS . output_dir or FLAGS . checkpoint_path
tpu_config_extra_kwargs = { }
if FLAGS . tpu_job_name is not None :
tpu_config_extra_kwargs [ "tpu_job_name" ] = FLAGS . tpu_job_name
if getattr ( hp , "mtf_mode" , False ) :
save_ckpt_steps = None
# Disable the default saver
save_ckpt_secs = None
# Disable the default saver
tpu_config_extra_kwargs = { "num_cores_per_replica" : 1 , "per_host_input_for_training" : tpu_config . InputPipelineConfig . BROADCAST , }
# the various custom getters we have written do not play well together yet .
# TODO ( noam ) : ask rsepassi for help here .
daisy_chain_variables = ( hp . daisy_chain_variables and hp . activation_dtype == "float32" and hp . weight_dtype == "float32" )
return trainer_lib . create_run_config ( model_name = FLAGS . model , model_dir = output_dir or os . path . expanduser ( FLAGS . output_dir ) , master = FLAGS . master , iterations_per_loop = FLAGS . iterations_per_loop , num_shards = FLAGS . tpu_num_shards , log_device_placement = FLAGS . log_device_placement , save_checkpoints_steps = save_ckpt_steps , save_checkpoints_secs = save_ckpt_secs , keep_checkpoint_max = FLAGS . keep_checkpoint_max , keep_checkpoint_every_n_hours = FLAGS . keep_checkpoint_every_n_hours , num_gpus = FLAGS . worker_gpu , gpu_order = FLAGS . gpu_order , num_async_replicas = FLAGS . worker_replicas , gpu_mem_fraction = FLAGS . worker_gpu_memory_fraction , enable_graph_rewriter = FLAGS . enable_graph_rewriter , use_tpu = FLAGS . use_tpu , use_tpu_estimator = FLAGS . use_tpu_estimator , xla_jit_level = FLAGS . xla_jit_level , schedule = FLAGS . schedule , no_data_parallelism = hp . no_data_parallelism , optionally_use_dist_strat = FLAGS . optionally_use_dist_strat , daisy_chain_variables = daisy_chain_variables , ps_replicas = FLAGS . ps_replicas , ps_job = FLAGS . ps_job , ps_gpu = FLAGS . ps_gpu , sync = FLAGS . sync , worker_id = FLAGS . worker_id , worker_job = FLAGS . worker_job , random_seed = FLAGS . random_seed , tpu_infeed_sleep_secs = FLAGS . tpu_infeed_sleep_secs , inter_op_parallelism_threads = FLAGS . inter_op_parallelism_threads , log_step_count_steps = FLAGS . log_step_count_steps , intra_op_parallelism_threads = FLAGS . intra_op_parallelism_threads , tpu_config_extra_kwargs = tpu_config_extra_kwargs , cloud_tpu_name = FLAGS . cloud_tpu_name ) |
def _repr_html_ ( self ) :
"""Build the HTML representation for IPython .""" | self . chart_id = '_' . join ( [ 'bearcart' , uuid4 ( ) . hex ] )
self . template_vars . update ( { 'chart_id' : self . chart_id , 'y_axis_id' : self . y_axis_id , 'legend_id' : self . legend_id , 'slider_id' : self . slider_id , 'export_json' : json . dumps ( self . json_data ) } )
self . _build_graph ( )
html = self . env . get_template ( 'ipynb_repr.html' )
return html . render ( self . template_vars ) |
def do_filter ( config , config_dir ) :
"""CLI action " run editor for filters list " .""" | if not os . path . exists ( config_dir ) :
print "Configuration '{}' does not exist." . format ( config )
exit ( 1 )
editor = os . environ [ "EDITOR" ]
config_filter = os . path . join ( config_dir , 'filter' )
call ( [ editor , config_filter ] )
print "Filter configuration has been updated." |
def send_voice ( self , * args , ** kwargs ) :
"""See : func : ` send _ voice `""" | return send_voice ( * args , ** self . _merge_overrides ( ** kwargs ) ) . run ( ) |
def schedule_hosting_device ( self , plugin , context , hosting_device ) :
"""Selects Cisco cfg agent that will configure < hosting _ device > .""" | active_cfg_agents = plugin . get_cfg_agents ( context , active = True )
if not active_cfg_agents :
LOG . warning ( 'There are no active Cisco cfg agents' )
# No worries , once a Cisco cfg agent is started and
# announces itself any " dangling " hosting devices
# will be scheduled to it .
return
LOG . debug ( 'Randomly selecting a Cisco cfg agent among %d candidates' % len ( active_cfg_agents ) )
return random . choice ( active_cfg_agents ) |
def GetSecurityDescriptor ( self ) :
"""Retrieves the security descriptor .
Returns :
pyfwnt . security _ descriptor : security descriptor .""" | fwnt_security_descriptor = pyfwnt . security_descriptor ( )
fwnt_security_descriptor . copy_from_byte_stream ( self . _fsntfs_file_entry . security_descriptor_data )
return fwnt_security_descriptor |
def windowed_iterable ( self ) :
"""That returns only the window""" | # Seek to offset
effective_offset = max ( 0 , self . item_view . iterable_index )
for i , item in enumerate ( self . iterable ) :
if i < effective_offset :
continue
elif i >= ( effective_offset + self . item_view . iterable_fetch_size ) :
return
yield item |
def do_save ( self , fp = None ) :
"""Save to file .
Parameters
fp : ` file ` or ` str ` , optional
Output file ( default : stdout ) .""" | if fp is None :
fp = sys . stdout
data = { 'settings' : self . settings , 'nodes' : self . nodes , 'backward' : self . backward }
json . dump ( data , fp , ensure_ascii = False ) |
def simulate ( self , ts_length = 100 , random_state = None ) :
r"""Simulate a time series of length ts _ length , first drawing
. . math : :
x _ 0 \ sim N ( \ mu _ 0 , \ Sigma _ 0)
Parameters
ts _ length : scalar ( int ) , optional ( default = 100)
The length of the simulation
random _ state : int or np . random . RandomState , optional
Random seed ( integer ) or np . random . RandomState instance to set
the initial state of the random number generator for
reproducibility . If None , a randomly initialized RandomState is
used .
Returns
x : array _ like ( float )
An n x ts _ length array , where the t - th column is : math : ` x _ t `
y : array _ like ( float )
A k x ts _ length array , where the t - th column is : math : ` y _ t `""" | random_state = check_random_state ( random_state )
x0 = multivariate_normal ( self . mu_0 . flatten ( ) , self . Sigma_0 )
w = random_state . randn ( self . m , ts_length - 1 )
v = self . C . dot ( w )
# Multiply each w _ t by C to get v _ t = C w _ t
# = = simulate time series = = #
x = simulate_linear_model ( self . A , x0 , v , ts_length )
if self . H is not None :
v = random_state . randn ( self . l , ts_length )
y = self . G . dot ( x ) + self . H . dot ( v )
else :
y = self . G . dot ( x )
return x , y |
def config ( commands = None , config_file = None , template_engine = 'jinja' , context = None , defaults = None , saltenv = 'base' , ** kwargs ) :
'''Configures the Nexus switch with the specified commands .
This method is used to send configuration commands to the switch . It
will take either a string or a list and prepend the necessary commands
to put the session into config mode .
. . warning : :
All the commands will be applied directly to the running - config .
config _ file
The source file with the configuration commands to be sent to the
device .
The file can also be a template that can be rendered using the template
engine of choice .
This can be specified using the absolute path to the file , or using one
of the following URL schemes :
- ` ` salt : / / ` ` , to fetch the file from the Salt fileserver .
- ` ` http : / / ` ` or ` ` https : / / ` `
- ` ` ftp : / / ` `
- ` ` s3 : / / ` `
- ` ` swift : / / ` `
commands
The commands to send to the switch in config mode . If the commands
argument is a string it will be cast to a list .
The list of commands will also be prepended with the necessary commands
to put the session in config mode .
. . note : :
This argument is ignored when ` ` config _ file ` ` is specified .
template _ engine : ` ` jinja ` `
The template engine to use when rendering the source file . Default :
` ` jinja ` ` . To simply fetch the file without attempting to render , set
this argument to ` ` None ` ` .
context
Variables to add to the template context .
defaults
Default values of the context _ dict .
no _ save _ config
If True , don ' t save configuration commands to startup configuration .
If False , save configuration to startup configuration .
Default : False
CLI Example :
. . code - block : : bash
salt ' * ' nxos . config commands = " [ ' spanning - tree mode mstp ' ] "
salt ' * ' nxos . config config _ file = salt : / / config . txt
salt ' * ' nxos . config config _ file = https : / / bit . ly / 2LGLcDy context = " { ' servers ' : [ ' 1.2.3.4 ' ] } "''' | initial_config = show ( 'show running-config' , ** kwargs )
if isinstance ( initial_config , list ) :
initial_config = initial_config [ 0 ]
if config_file :
file_str = __salt__ [ 'cp.get_file_str' ] ( config_file , saltenv = saltenv )
if file_str is False :
raise CommandExecutionError ( 'Source file {} not found' . format ( config_file ) )
elif commands :
if isinstance ( commands , ( six . string_types , six . text_type ) ) :
commands = [ commands ]
file_str = '\n' . join ( commands )
# unify all the commands in a single file , to render them in a go
if template_engine :
file_str = __salt__ [ 'file.apply_template_on_contents' ] ( file_str , template_engine , context , defaults , saltenv )
# whatever the source of the commands would be , split them line by line
commands = [ line for line in file_str . splitlines ( ) if line . strip ( ) ]
config_result = _parse_config_result ( _configure_device ( commands , ** kwargs ) )
current_config = show ( 'show running-config' , ** kwargs )
if isinstance ( current_config , list ) :
current_config = current_config [ 0 ]
diff = difflib . unified_diff ( initial_config . splitlines ( 1 ) [ 4 : ] , current_config . splitlines ( 1 ) [ 4 : ] )
clean_diff = '' . join ( [ x . replace ( '\r' , '' ) for x in diff ] )
head = 'COMMAND_LIST: '
cc = config_result [ 0 ]
cr = config_result [ 1 ]
return head + cc + '\n' + cr + '\n' + clean_diff |
def get_free_dims ( model , visible_dims , fixed_dims ) :
"""work out what the inputs are for plotting ( 1D or 2D )
The visible dimensions are the dimensions , which are visible .
the fixed _ dims are the fixed dimensions for this .
The free _ dims are then the visible dims without the fixed dims .""" | if visible_dims is None :
visible_dims = np . arange ( model . input_dim )
dims = np . asanyarray ( visible_dims )
if fixed_dims is not None :
dims = [ dim for dim in dims if dim not in fixed_dims ]
return np . asanyarray ( [ dim for dim in dims if dim is not None ] ) |
def ljust ( self , width , fillchar = None ) :
"""S . ljust ( width [ , fillchar ] ) - > string
If a fillchar is provided , less formatting information will be preserved""" | if fillchar is not None :
return fmtstr ( self . s . ljust ( width , fillchar ) , ** self . shared_atts )
to_add = ' ' * ( width - len ( self . s ) )
shared = self . shared_atts
if 'bg' in shared :
return self + fmtstr ( to_add , bg = shared [ str ( 'bg' ) ] ) if to_add else self
else :
uniform = self . new_with_atts_removed ( 'bg' )
return uniform + fmtstr ( to_add , ** self . shared_atts ) if to_add else uniform |
def controlPoints ( self ) :
""": remarks Generates the control points for this path
: return < list > [ < tuple > ( < float > x , < float > y ) , . . ]""" | # define input variables
in_point = self . inputPoint ( )
in_rect = self . inputRect ( )
in_x = in_point . x ( )
in_y = in_point . y ( )
in_cx = in_rect . center ( ) . x ( )
in_cy = in_rect . center ( ) . y ( )
in_left = in_rect . left ( )
in_right = in_rect . right ( )
in_top = in_rect . top ( )
in_bot = in_rect . bottom ( )
in_loc = self . inputLocation ( )
# define output variables
out_point = self . outputPoint ( )
out_rect = self . outputRect ( )
out_x = out_point . x ( )
out_y = out_point . y ( )
out_cx = out_rect . center ( ) . x ( )
out_cy = out_rect . center ( ) . y ( )
out_left = out_rect . left ( )
out_right = out_rect . right ( )
out_top = out_rect . top ( )
out_bot = out_rect . bottom ( )
out_loc = self . outputLocation ( )
# define global variables
pad = self . squashThreshold ( )
loc_left = XConnectionLocation . Left
loc_right = XConnectionLocation . Right
loc_top = XConnectionLocation . Top
loc_bot = XConnectionLocation . Bottom
# calculate deltas
delta_x = abs ( in_x - out_x )
delta_y = abs ( in_y - out_y )
buffer = 2
# calculate point scenarios
# right - > left
if ( out_loc & loc_right ) and ( in_loc & loc_left ) and out_right < in_left : # no y change , bounding rects don ' t overlap
if delta_y < buffer :
return [ ( out_x , out_y ) , ( in_x , in_y ) ]
# y change , padding deltas don ' t overlap
elif out_right + pad < in_left - pad :
return [ ( out_x , out_y ) , ( out_x + delta_x / 2.0 , out_y ) , ( out_x + delta_x / 2.0 , in_y ) , ( in_x , in_y ) ]
# left - > right
if ( out_loc & loc_left ) and ( in_loc & loc_right ) and in_right < out_left : # no y change , bounding rects don ' t overlap
if delta_y < buffer and in_x < out_x :
return [ ( out_x , out_y ) , ( in_x , in_y ) ]
# y change , padding deltas don ' t overlap
elif in_left + pad < out_right - pad :
return [ ( out_x , out_y ) , ( out_x - delta_x / 2.0 , out_y ) , ( out_x - delta_x / 2.0 , in_y ) , ( in_x , in_y ) ]
# bottom - > top
if ( out_loc & loc_bot ) and ( in_loc & loc_top ) and out_bot < in_top : # no x change , bounding rects don ' t overlap
if delta_x < buffer and out_y < in_y :
return [ ( out_x , out_y ) , ( in_x , in_y ) ]
# x change , pading delta ' s don ' t overlap
elif out_bot + pad < in_top - pad :
return [ ( out_x , out_y ) , ( out_x , out_y + delta_y / 2.0 ) , ( in_x , out_y + delta_y / 2.0 ) , ( in_x , in_y ) ]
# top - > bottom
if ( out_loc & loc_top ) and ( in_loc & loc_bot ) and in_bot < out_top : # no x change , bounding rects don ' t overlap
if delta_x < buffer and in_y < out_y :
return [ ( out_x , out_y ) , ( in_x , in_y ) ]
# y change , padding deltas don ' t overlap
elif in_bot + pad < out_top - pad :
return [ ( out_x , out_y ) , ( out_x , out_y - delta_y / 2.0 ) , ( in_x , out_y - delta_y / 2.0 ) , ( in_x , in_y ) ]
# bottom - > left
if ( out_loc & loc_bot ) and ( in_loc & loc_left ) :
if out_y + pad < in_y and out_x + pad < in_x :
return [ ( out_x , out_y ) , ( out_x , in_y ) , ( in_x , in_y ) ]
# bottom - > right
if ( out_loc & loc_bot ) and ( in_loc & loc_right ) :
if out_y + pad < in_y and out_x - pad > in_x :
return [ ( out_x , out_y ) , ( out_x , in_y ) , ( in_x , in_y ) ]
# top - > left
if ( out_loc & loc_top ) and ( in_loc & loc_left ) :
if in_y + pad < out_y and out_x + pad < in_x :
return [ ( out_x , out_y ) , ( out_x , in_y ) , ( in_x , in_y ) ]
# top - > right
if ( out_loc & loc_top ) and ( in_loc & loc_right ) :
if in_y + pad < out_y and out_x - pad > in_x :
return [ ( out_x , out_y ) , ( out_x , in_y ) , ( in_x , in_y ) ]
# right - > top
if ( out_loc & loc_right ) and ( in_loc & loc_top ) :
if out_x + pad < in_x and out_y - pad < in_y :
return [ ( out_x , out_y ) , ( in_x , out_y ) , ( in_x , in_y ) ]
# right - > bottom
if ( out_loc & loc_right ) and ( in_loc & loc_bot ) :
if out_x + pad < in_x and out_y + pad > in_y :
return [ ( out_x , out_y ) , ( in_x , out_y ) , ( in_x , in_y ) ]
# left - > top
if ( out_loc & loc_left ) and ( in_loc & loc_top ) :
if in_x + pad < out_x and out_y - pad < in_y :
return [ ( out_x , out_y ) , ( in_x , out_y ) , ( in_x , in_y ) ]
# left - > bottom
if ( out_loc & loc_left ) and ( in_loc & loc_bot ) :
if in_x + pad < out_x and out_y + pad > in_y :
return [ ( out_x , out_y ) , ( in_x , out_y ) , ( in_x , in_y ) ]
# right - > right
if ( out_loc & loc_right ) and ( in_loc & loc_right ) :
max_x = max ( out_right + 2 * pad , in_right + 2 * pad )
if out_cx <= in_cx or not ( out_loc & loc_left and in_loc & loc_left ) :
return [ ( out_x , out_y ) , ( max_x , out_y ) , ( max_x , in_y ) , ( in_x , in_y ) ]
# left - > left
if ( out_loc & loc_left ) and ( in_loc & loc_left ) :
min_x = min ( out_left - 2 * pad , in_left - 2 * pad )
return [ ( out_x , out_y ) , ( min_x , out_y ) , ( min_x , in_y ) , ( in_x , in_y ) ]
# top - > top
if ( out_loc & loc_top ) and ( in_loc & loc_top ) :
if out_cy <= in_cy or not ( out_loc & loc_bot and in_loc & loc_bot ) :
min_y = min ( out_top - 2 * pad , in_top - 2 * pad )
return [ ( out_x , out_y ) , ( out_x , min_y ) , ( in_x , min_y ) , ( in_x , in_y ) ]
# bottom - > bottom
if ( out_loc & loc_bot ) and ( in_loc & loc_bot ) :
max_y = max ( out_y + 2 * pad , out_y + 2 * pad )
return [ ( out_x , out_y ) , ( out_x , max_y ) , ( in_x , max_y ) , ( in_x , in_y ) ]
# right - > left with center squash
if ( out_loc & loc_right ) and ( in_loc & loc_left ) :
if out_bot < in_top :
mid_y = out_bot + ( in_top - out_bot ) / 2.0
elif in_bot < out_top :
mid_y = in_bot + ( out_top - in_bot ) / 2.0
else :
mid_y = None
if mid_y :
return [ ( out_x , out_y ) , ( out_x + 2 * pad , out_y ) , ( out_x + 2 * pad , mid_y ) , ( in_x - 2 * pad , mid_y ) , ( in_x - 2 * pad , in_y ) , ( in_x , in_y ) ]
# left - > right with center squash
if ( out_loc & loc_left ) and ( in_loc & loc_right ) :
if out_bot < in_top :
mid_y = in_top + ( in_top - out_bot ) / 2.0
elif in_bot < out_top :
mid_y = out_top - ( out_top - in_bot ) / 2.0
else :
mid_y = None
if mid_y :
return [ ( out_x , out_y ) , ( out_x - 2 * pad , out_y ) , ( out_x - 2 * pad , mid_y ) , ( in_x + 2 * pad , mid_y ) , ( in_x + 2 * pad , in_y ) , ( in_x , in_y ) ]
# bottom - > top with center squash
if ( out_loc & loc_bot ) and ( in_loc & loc_top ) :
if out_right < in_left :
mid_x = out_right + ( in_left - out_right ) / 2.0
elif in_right < out_left :
mid_x = in_right + ( out_left - in_right ) / 2.0
else :
mid_x = None
if mid_x :
return [ ( out_x , out_y ) , ( out_x , out_y + 2 * pad ) , ( mid_x , out_y + 2 * pad ) , ( mid_x , in_y - 2 * pad ) , ( in_x , in_y - 2 * pad ) , ( in_x , in_y ) ]
# top - > bottom with center squash
if ( out_loc & loc_top ) and ( in_loc & loc_bot ) :
if out_right < in_left :
mid_x = in_left + ( in_left - out_right ) / 2.0
elif in_right < out_left :
mid_x = out_left - ( out_left - in_right ) / 2.0
else :
mid_x = None
if mid_x :
return [ ( out_x , out_y ) , ( out_x , out_y - 2 * pad ) , ( mid_x , out_y - 2 * pad ) , ( mid_x , in_y + 2 * pad ) , ( in_x , in_y + 2 * pad ) , ( in_x , in_y ) ]
# right - > left with looping
if ( out_loc & loc_right ) and ( in_loc & loc_left ) :
max_y = max ( out_bot + 2 * pad , in_bot + 2 * pad )
return [ ( out_x , out_y ) , ( out_x + 2 * pad , out_y ) , ( out_x + 2 * pad , max_y ) , ( in_x - 2 * pad , max_y ) , ( in_x - 2 * pad , in_y ) , ( in_x , in_y ) ]
# left - > right with looping
if ( out_loc & loc_left ) and ( in_loc & loc_right ) :
max_y = max ( out_bot + 2 * pad , in_bot + 2 * pad )
return [ ( out_x , out_y ) , ( out_x - 2 * pad , out_y ) , ( out_x - 2 * pad , max_y ) , ( in_x + 2 * pad , max_y ) , ( in_x + 2 * pad , in_y ) , ( in_x , in_y ) ]
# bottom - > top with looping
if ( out_loc & loc_bot ) and ( in_loc & loc_top ) :
max_x = max ( out_right + 2 * pad , in_right + 2 * pad )
return [ ( out_x , out_y ) , ( out_x , out_y + 2 * pad ) , ( max_x , out_y + 2 * pad ) , ( max_x , in_y - 2 * pad ) , ( in_x , in_y - 2 * pad ) , ( in_x , in_y ) ]
# top - > bottom with looping
if ( out_loc & loc_top ) and ( in_loc & loc_bot ) :
max_x = max ( out_right + 2 * pad , in_right + 2 * pad )
return [ ( out_x , out_y ) , ( out_x , out_y - 2 * pad ) , ( max_x , out_y - 2 * pad ) , ( max_x , in_y + 2 * pad ) , ( in_x , in_y + 2 * pad ) , ( in_x , in_y ) ]
# right - > right with looping
if ( out_loc & loc_right ) and ( in_loc & loc_right ) :
max_y = max ( out_bot + 2 * pad , in_bot + 2 * pad )
mid_x = out_left - abs ( out_left - in_right ) / 2.0
return [ ( out_x , out_y ) , ( out_x + 2 * pad , out_y ) , ( out_x + 2 * pad , max_y ) , ( mid_x , max_y ) , ( mid_x , in_y ) , ( in_x , in_y ) ]
# left - > left with looping
if ( out_loc & loc_left ) and ( in_loc & loc_left ) :
max_y = max ( out_bot + 2 * pad , in_bot + 2 * pad )
mid_x = in_left - abs ( in_left - out_right ) / 2.0
return [ ( out_x , out_y ) , ( out_x - 2 * pad , out_y ) , ( out_x - 2 * pad , max_y ) , ( mid_x , max_y ) , ( mid_x , in_y ) , ( in_x , in_y ) ]
# unknown , return a direct route
return [ ( out_x , out_y ) , ( in_x , in_y ) ] |
def mkres ( self ) :
"""Create a directory tree for the resized assets""" | for d in DENSITY_TYPES :
if d == 'ldpi' and not self . ldpi :
continue
# skip ldpi
if d == 'xxxhdpi' and not self . xxxhdpi :
continue
# skip xxxhdpi
try :
path = os . path . join ( self . out , 'res/drawable-%s' % d )
os . makedirs ( path , 0o755 )
except OSError :
pass |
def AgregarRetencion ( self , codigo_concepto , detalle_aclaratorio , base_calculo , alicuota , nro_certificado_retencion = None , fecha_certificado_retencion = None , importe_certificado_retencion = None , ** kwargs ) :
"Agrega la información referente a las retenciones de la liquidación" | # limpio los campos opcionales :
if fecha_certificado_retencion is not None and not fecha_certificado_retencion . strip ( ) :
fecha_certificado_retencion = None
if importe_certificado_retencion is not None and not float ( importe_certificado_retencion ) :
importe_certificado_retencion = None
if nro_certificado_retencion is not None and not int ( nro_certificado_retencion ) :
nro_certificado_retencion = None
self . retenciones . append ( dict ( retencion = dict ( codigoConcepto = codigo_concepto , detalleAclaratorio = detalle_aclaratorio , baseCalculo = base_calculo , alicuota = alicuota , nroCertificadoRetencion = nro_certificado_retencion , fechaCertificadoRetencion = fecha_certificado_retencion , importeCertificadoRetencion = importe_certificado_retencion , ) ) )
return True |
def orientation_magic ( or_con = 1 , dec_correction_con = 1 , dec_correction = 0 , bed_correction = True , samp_con = '1' , hours_from_gmt = 0 , method_codes = '' , average_bedding = False , orient_file = 'orient.txt' , samp_file = 'samples.txt' , site_file = 'sites.txt' , output_dir_path = '.' , input_dir_path = '' , append = False , data_model = 3 ) :
"""use this function to convert tab delimited field notebook information to MagIC formatted tables ( er _ samples and er _ sites )
INPUT FORMAT
Input files must be tab delimited and have in the first line :
tab location _ name
Note : The " location _ name " will facilitate searching in the MagIC database . Data from different
" locations " should be put in separate files . The definition of a " location " is rather loose .
Also this is the word ' tab ' not a tab , which will be indicated by ' \t ' .
The second line has the names of the columns ( tab delimited ) , e . g . :
site _ name sample _ name mag _ azimuth field _ dip date lat long sample _ lithology sample _ type sample _ class shadow _ angle hhmm stratigraphic _ height bedding _ dip _ direction bedding _ dip GPS _ baseline image _ name image _ look image _ photographer participants method _ codes site _ description sample _ description GPS _ Az , sample _ igsn , sample _ texture , sample _ cooling _ rate , cooling _ rate _ corr , cooling _ rate _ mcd
Notes :
1 ) column order doesn ' t matter but the NAMES do .
2 ) sample _ name , sample _ lithology , sample _ type , sample _ class , lat and long are required . all others are optional .
3 ) If subsequent data are the same ( e . g . , date , bedding orientation , participants , stratigraphic _ height ) ,
you can leave the field blank and the program will fill in the last recorded information . BUT if you really want a blank stratigraphic _ height , enter a ' - 1 ' . These will not be inherited and must be specified for each entry : image _ name , look , photographer or method _ codes
4 ) hhmm must be in the format : hh : mm and the hh must be in 24 hour time .
date must be mm / dd / yy ( years < 50 will be converted to 20yy and > 50 will be assumed 19yy ) . hours _ from _ gmt is the number of hours to SUBTRACT from hh to get to GMT .
5 ) image _ name , image _ look and image _ photographer are colon delimited lists of file name ( e . g . , IMG _ 001 . jpg ) image look direction and the name of the photographer respectively . If all images had same look and photographer , just enter info once . The images will be assigned to the site for which they were taken - not at the sample level .
6 ) participants : Names of who helped take the samples . These must be a colon delimited list .
7 ) method _ codes : Special method codes on a sample level , e . g . , SO - GT5 which means the orientation is has an uncertainty of > 5 degrees
for example if it broke off before orienting . . . .
8 ) GPS _ Az is the place to put directly determined GPS Azimuths , using , e . g . , points along the drill direction .
9 ) sample _ cooling _ rate is the cooling rate in K per Ma
10 ) int _ corr _ cooling _ rate
11 ) cooling _ rate _ mcd : data adjustment method code for cooling rate correction ; DA - CR - EG is educated guess ; DA - CR - PS is percent estimated from pilot samples ; DA - CR - TRM is comparison between 2 TRMs acquired with slow and rapid cooling rates .
is the percent cooling rate factor to apply to specimens from this sample , DA - CR - XX is the method code
defaults :
orientation _ magic ( or _ con = 1 , dec _ correction _ con = 1 , dec _ correction = 0 , bed _ correction = True , samp _ con = ' 1 ' , hours _ from _ gmt = 0 , method _ codes = ' ' , average _ bedding = False , orient _ file = ' orient . txt ' , samp _ file = ' er _ samples . txt ' , site _ file = ' er _ sites . txt ' , output _ dir _ path = ' . ' , input _ dir _ path = ' ' , append = False ) :
orientation conventions :
[1 ] Standard Pomeroy convention of azimuth and hade ( degrees from vertical down )
of the drill direction ( field arrow ) . lab arrow azimuth = sample _ azimuth = mag _ azimuth ;
lab arrow dip = sample _ dip = - field _ dip . i . e . the lab arrow dip is minus the hade .
[2 ] Field arrow is the strike of the plane orthogonal to the drill direction ,
Field dip is the hade of the drill direction . Lab arrow azimuth = mag _ azimuth - 90
Lab arrow dip = - field _ dip
[3 ] Lab arrow is the same as the drill direction ;
hade was measured in the field .
Lab arrow azimuth = mag _ azimuth ; Lab arrow dip = 90 - field _ dip
[4 ] lab azimuth and dip are same as mag _ azimuth , field _ dip : use this for unoriented samples too
[5 ] Same as AZDIP convention explained below -
azimuth and inclination of the drill direction are mag _ azimuth and field _ dip ;
lab arrow is as in [ 1 ] above .
lab azimuth is same as mag _ azimuth , lab arrow dip = field _ dip - 90
[6 ] Lab arrow azimuth = mag _ azimuth - 90 ; Lab arrow dip = 90 - field _ dip
[7 ] see http : / / earthref . org / PmagPy / cookbook / # field _ info for more information . You can customize other format yourself , or email ltauxe @ ucsd . edu for help .
Magnetic declination convention :
[1 ] Use the IGRF value at the lat / long and date supplied [ default ]
[2 ] Will supply declination correction
[3 ] mag _ az is already corrected in file
[4 ] Correct mag _ az but not bedding _ dip _ dir
Sample naming convention :
[1 ] XXXXY : where XXXX is an arbitrary length site designation and Y
is the single character sample designation . e . g . , TG001a is the
first sample from site TG001 . [ default ]
[2 ] XXXX - YY : YY sample from site XXXX ( XXX , YY of arbitary length )
[3 ] XXXX . YY : YY sample from site XXXX ( XXX , YY of arbitary length )
[4 - Z ] XXXX [ YYY ] : YYY is sample designation with Z characters from site XXX
[5 ] site name = sample name
[6 ] site name entered in site _ name column in the orient . txt format input file - - NOT CURRENTLY SUPPORTED
[7 - Z ] [ XXX ] YYY : XXX is site designation with Z characters from samples XXXYYY
NB : all others you will have to either customize your
self or e - mail ltauxe @ ucsd . edu for help .""" | # initialize some variables
# bed _ correction used to be BedCorr
# dec _ correction _ con used to be corr
# dec _ correction used to be DecCorr
# meths is now method _ codes
# delta _ u is now hours _ from _ gmt
input_dir_path , output_dir_path = pmag . fix_directories ( input_dir_path , output_dir_path )
or_con , dec_correction_con , dec_correction = int ( or_con ) , int ( dec_correction_con ) , float ( dec_correction )
hours_from_gmt = float ( hours_from_gmt )
stratpos = ""
# date of sampling , latitude ( pos North ) , longitude ( pos East )
date , lat , lon = "" , "" , ""
bed_dip , bed_dip_dir = "" , ""
Lats , Lons = [ ] , [ ]
# list of latitudes and longitudes
# lists of Sample records and Site records
SampOuts , SiteOuts , ImageOuts = [ ] , [ ] , [ ]
samplelist , sitelist , imagelist = [ ] , [ ] , [ ]
Z = 1
newbaseline , newbeddir , newbeddip = "" , "" , ""
fpars = [ ]
sclass , lithology , sample_type = "" , "" , ""
newclass , newlith , newtype = '' , '' , ''
BPs = [ ]
# bedding pole declinations , bedding pole inclinations
image_file = "er_images.txt"
# use 3.0 . default filenames when in 3.0.
# but , still allow for custom names
data_model = int ( data_model )
if data_model == 3 :
if samp_file == "er_samples.txt" :
samp_file = "samples.txt"
if site_file == "er_sites.txt" :
site_file = "sites.txt"
image_file = "images.txt"
orient_file = pmag . resolve_file_name ( orient_file , input_dir_path )
if not os . path . exists ( orient_file ) :
return False , "No such file: {}. If the orientation file is not in your current working directory, make sure you have specified the correct input directory." . format ( orient_file )
samp_file = os . path . join ( output_dir_path , samp_file )
site_file = os . path . join ( output_dir_path , site_file )
image_file = os . path . join ( output_dir_path , image_file )
# validate input
if '4' in samp_con [ 0 ] :
pattern = re . compile ( '[4][-]\d' )
result = pattern . match ( samp_con )
if not result :
raise Exception ( "If using sample naming convention 4, you must provide the number of characters with which to distinguish sample from site. [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX)" )
if '7' in samp_con [ 0 ] :
pattern = re . compile ( '[7][-]\d' )
result = pattern . match ( samp_con )
if not result :
raise Exception ( "If using sample naming convention 7, you must provide the number of characters with which to distinguish sample from site. [7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY" )
if dec_correction_con == 2 and not dec_correction :
raise Exception ( "If using magnetic declination convention 2, you must also provide a declincation correction in degrees" )
SampRecs , SiteRecs , ImageRecs = [ ] , [ ] , [ ]
SampRecs_sorted , SiteRecs_sorted = { } , { }
if append :
try :
SampRecs , file_type = pmag . magic_read ( samp_file )
# convert 3.0 . sample file to 2.5 format
if data_model == 3 :
SampRecs3 = SampRecs
SampRecs = [ ]
for samp_rec in SampRecs3 :
rec = map_magic . mapping ( samp_rec , map_magic . samp_magic3_2_magic2_map )
SampRecs . append ( rec )
# magic _ data dictionary sorted by sample _ name
SampRecs_sorted = pmag . sort_magic_data ( SampRecs , 'er_sample_name' )
print ( 'sample data to be appended to: ' , samp_file )
except Exception as ex :
print ( ex )
print ( 'problem with existing file: ' , samp_file , ' will create new.' )
try :
SiteRecs , file_type = pmag . magic_read ( site_file )
# convert 3.0 . site file to 2.5 format
if data_model == 3 :
SiteRecs3 = SiteRecs
SiteRecs = [ ]
for site_rec in SiteRecs3 :
SiteRecs . append ( map_magic . mapping ( site_rec , map_magic . site_magic3_2_magic2_map ) )
# magic _ data dictionary sorted by site _ name
SiteRecs_sorted = pmag . sort_magic_data ( SiteRecs , 'er_site_name' )
print ( 'site data to be appended to: ' , site_file )
except Exception as ex :
print ( ex )
print ( 'problem with existing file: ' , site_file , ' will create new.' )
try :
ImageRecs , file_type = pmag . magic_read ( image_file )
# convert from 3.0 . - - > 2.5
if data_model == 3 :
ImageRecs3 = ImageRecs
ImageRecs = [ ]
for image_rec in ImageRecs3 :
ImageRecs . append ( map_magic . mapping ( image_rec , map_magic . image_magic3_2_magic2_map ) )
print ( 'image data to be appended to: ' , image_file )
except :
print ( 'problem with existing file: ' , image_file , ' will create new.' )
# read in file to convert
OrData , location_name = pmag . magic_read ( orient_file )
if location_name == "demag_orient" :
location_name = ""
# step through the data sample by sample
# use map _ magic in here . . .
for OrRec in OrData :
if 'mag_azimuth' not in list ( OrRec . keys ( ) ) :
OrRec [ 'mag_azimuth' ] = ""
if 'field_dip' not in list ( OrRec . keys ( ) ) :
OrRec [ 'field_dip' ] = ""
if OrRec [ 'mag_azimuth' ] == " " :
OrRec [ "mag_azimuth" ] = ""
if OrRec [ 'field_dip' ] == " " :
OrRec [ "field_dip" ] = ""
if 'sample_description' in list ( OrRec . keys ( ) ) :
sample_description = OrRec [ 'sample_description' ]
else :
sample_description = ""
if 'cooling_rate_corr' in list ( OrRec . keys ( ) ) :
if 'cooling_rate_mcd' not in list ( OrRec . keys ( ) ) :
OrRec [ 'cooling_rate_mcd' ] = 'DA-CR'
sample_orientation_flag = 'g'
if 'sample_orientation_flag' in list ( OrRec . keys ( ) ) :
if OrRec [ 'sample_orientation_flag' ] == 'b' or OrRec [ "mag_azimuth" ] == "" :
sample_orientation_flag = 'b'
methcodes = method_codes
# initialize method codes
if methcodes :
if 'method_codes' in list ( OrRec . keys ( ) ) and OrRec [ 'method_codes' ] . strip ( ) != "" :
methcodes = methcodes + ":" + OrRec [ 'method_codes' ]
# add notes
else :
if 'method_codes' in list ( OrRec . keys ( ) ) and OrRec [ 'method_codes' ] . strip ( ) != "" :
methcodes = OrRec [ 'method_codes' ]
# add notes
codes = methcodes . replace ( " " , "" ) . split ( ":" )
sample_name = OrRec [ "sample_name" ]
# patch added by rshaar 7/2016
# if sample _ name already exists in er _ samples . txt :
# merge the new data colmuns calculated by orientation _ magic with the existing data colmuns
# this is done to make sure no previous data in er _ samples . txt and
# er _ sites . txt is lost .
if sample_name in list ( SampRecs_sorted . keys ( ) ) :
Prev_MagRec = SampRecs_sorted [ sample_name ] [ - 1 ]
MagRec = Prev_MagRec
else :
Prev_MagRec = { }
MagRec = { }
MagRec [ "er_citation_names" ] = "This study"
# the following keys were calculated or defined in the code above :
for key in [ 'sample_igsn' , 'sample_texture' , 'sample_cooling_rate' , 'cooling_rate_corr' , 'cooling_rate_mcd' ] :
val = OrRec . get ( key , '' )
if val :
MagRec [ key ] = val
elif key in list ( Prev_MagRec . keys ( ) ) :
MagRec [ key ] = Prev_MagRec [ key ]
else :
MagRec [ key ] = ""
if location_name != "" :
MagRec [ "er_location_name" ] = location_name
elif "er_location_name" in list ( Prev_MagRec . keys ( ) ) :
MagRec [ "er_location_name" ] = Prev_MagRec [ "er_location_name" ]
else :
MagRec [ "er_location_name" ] = ""
# the following keys are taken directly from OrRec dictionary :
for key in [ "sample_height" , "er_sample_alternatives" , "sample_orientation_flag" ] :
if key in list ( OrRec . keys ( ) ) and OrRec [ key ] != "" :
MagRec [ key ] = OrRec [ key ]
elif key in list ( Prev_MagRec . keys ( ) ) :
MagRec [ key ] = Prev_MagRec [ key ]
else :
MagRec [ key ] = ""
# the following keys , if blank , used to be defined here as " Not Specified " :
for key in [ "sample_class" , "sample_lithology" , "sample_type" ] :
if key in list ( OrRec . keys ( ) ) and OrRec [ key ] != "" and OrRec [ key ] != "Not Specified" :
MagRec [ key ] = OrRec [ key ]
elif key in list ( Prev_MagRec . keys ( ) ) and Prev_MagRec [ key ] != "" and Prev_MagRec [ key ] != "Not Specified" :
MagRec [ key ] = Prev_MagRec [ key ]
else :
MagRec [ key ] = ""
# " Not Specified "
# ( rshaar ) From here parse new information and replace previous , if exists :
# parse information common to all orientation methods
MagRec [ "er_sample_name" ] = OrRec [ "sample_name" ]
if "IGSN" in list ( OrRec . keys ( ) ) :
MagRec [ "sample_igsn" ] = OrRec [ "IGSN" ]
else :
MagRec [ "sample_igsn" ] = ""
# MagRec [ " sample _ height " ] , MagRec [ " sample _ bed _ dip _ direction " ] , MagRec [ " sample _ bed _ dip " ] = " " , " " , " "
MagRec [ "sample_bed_dip_direction" ] , MagRec [ "sample_bed_dip" ] = "" , ""
# if " er _ sample _ alternatives " in OrRec . keys ( ) :
# MagRec [ " er _ sample _ alternatives " ] = OrRec [ " sample _ alternatives " ]
sample = OrRec [ "sample_name" ]
if OrRec [ 'mag_azimuth' ] == "" and OrRec [ 'field_dip' ] != "" :
OrRec [ 'mag_azimuth' ] = '999'
if OrRec [ "mag_azimuth" ] != "" :
labaz , labdip = pmag . orient ( float ( OrRec [ "mag_azimuth" ] ) , float ( OrRec [ "field_dip" ] ) , or_con )
if labaz < 0 :
labaz += 360.
else :
labaz , labdip = "" , ""
if OrRec [ 'mag_azimuth' ] == '999' :
labaz = ""
if "GPS_baseline" in list ( OrRec . keys ( ) ) and OrRec [ 'GPS_baseline' ] != "" :
newbaseline = OrRec [ "GPS_baseline" ]
if newbaseline != "" :
baseline = float ( newbaseline )
MagRec [ 'er_scientist_mail_names' ] = OrRec . get ( 'participants' , '' )
newlat = OrRec [ "lat" ]
if newlat != "" :
lat = float ( newlat )
if lat == "" :
print ( "No latitude specified for ! " , sample , ". Latitude is required for all samples." )
return False , "No latitude specified for ! " + sample + ". Latitude is required for all samples."
MagRec [ "sample_lat" ] = '%11.5f' % ( lat )
newlon = OrRec [ "long" ]
if newlon != "" :
lon = float ( newlon )
if lon == "" :
print ( "No longitude specified for ! " , sample , ". Longitude is required for all samples." )
return False , str ( "No longitude specified for ! " + sample + ". Longitude is required for all samples." )
MagRec [ "sample_lon" ] = '%11.5f' % ( lon )
if 'bedding_dip_direction' in list ( OrRec . keys ( ) ) :
newbeddir = OrRec [ "bedding_dip_direction" ]
if newbeddir != "" :
bed_dip_dir = OrRec [ 'bedding_dip_direction' ]
if 'bedding_dip' in list ( OrRec . keys ( ) ) :
newbeddip = OrRec [ "bedding_dip" ]
if newbeddip != "" :
bed_dip = OrRec [ 'bedding_dip' ]
MagRec [ "sample_bed_dip" ] = bed_dip
MagRec [ "sample_bed_dip_direction" ] = bed_dip_dir
# MagRec [ " sample _ type " ] = sample _ type
if labdip != "" :
MagRec [ "sample_dip" ] = '%7.1f' % labdip
else :
MagRec [ "sample_dip" ] = ""
if "date" in list ( OrRec . keys ( ) ) and OrRec [ "date" ] != "" :
newdate = OrRec [ "date" ]
if newdate != "" :
date = newdate
mmddyy = date . split ( '/' )
yy = int ( mmddyy [ 2 ] )
if yy > 50 :
yy = 1900 + yy
else :
yy = 2000 + yy
decimal_year = yy + old_div ( float ( mmddyy [ 0 ] ) , 12 )
sample_date = '%i:%s:%s' % ( yy , mmddyy [ 0 ] , mmddyy [ 1 ] )
time = OrRec [ 'hhmm' ]
if time :
sample_date += ( ':' + time )
MagRec [ "sample_date" ] = sample_date . strip ( ':' )
if labaz != "" :
MagRec [ "sample_azimuth" ] = '%7.1f' % ( labaz )
else :
MagRec [ "sample_azimuth" ] = ""
if "stratigraphic_height" in list ( OrRec . keys ( ) ) :
if OrRec [ "stratigraphic_height" ] != "" :
MagRec [ "sample_height" ] = OrRec [ "stratigraphic_height" ]
stratpos = OrRec [ "stratigraphic_height" ]
elif OrRec [ "stratigraphic_height" ] == '-1' :
MagRec [ "sample_height" ] = ""
# make empty
elif stratpos != "" : # keep last record if blank
MagRec [ "sample_height" ] = stratpos
# get magnetic declination ( corrected with igrf value )
if dec_correction_con == 1 and MagRec [ 'sample_azimuth' ] != "" :
x , y , z , f = pmag . doigrf ( lon , lat , 0 , decimal_year )
Dir = pmag . cart2dir ( ( x , y , z ) )
dec_correction = Dir [ 0 ]
if "bedding_dip" in list ( OrRec . keys ( ) ) :
if OrRec [ "bedding_dip" ] != "" :
MagRec [ "sample_bed_dip" ] = OrRec [ "bedding_dip" ]
bed_dip = OrRec [ "bedding_dip" ]
else :
MagRec [ "sample_bed_dip" ] = bed_dip
else :
MagRec [ "sample_bed_dip" ] = '0'
if "bedding_dip_direction" in list ( OrRec . keys ( ) ) :
if OrRec [ "bedding_dip_direction" ] != "" and bed_correction == 1 :
dd = float ( OrRec [ "bedding_dip_direction" ] ) + dec_correction
if dd > 360. :
dd = dd - 360.
MagRec [ "sample_bed_dip_direction" ] = '%7.1f' % ( dd )
dip_dir = MagRec [ "sample_bed_dip_direction" ]
else :
MagRec [ "sample_bed_dip_direction" ] = OrRec [ 'bedding_dip_direction' ]
else :
MagRec [ "sample_bed_dip_direction" ] = '0'
if average_bedding :
if str ( MagRec [ "sample_bed_dip_direction" ] ) and str ( MagRec [ "sample_bed_dip" ] ) :
BPs . append ( [ float ( MagRec [ "sample_bed_dip_direction" ] ) , float ( MagRec [ "sample_bed_dip" ] ) - 90. , 1. ] )
if MagRec [ 'sample_azimuth' ] == "" and MagRec [ 'sample_dip' ] == "" :
MagRec [ "sample_declination_correction" ] = ''
methcodes = methcodes + ':SO-NO'
MagRec [ "magic_method_codes" ] = methcodes
MagRec [ 'sample_description' ] = sample_description
# work on the site stuff too
if 'site_name' in list ( OrRec . keys ( ) ) and OrRec [ 'site_name' ] != "" :
site = OrRec [ 'site_name' ]
elif 'site_name' in list ( Prev_MagRec . keys ( ) ) and Prev_MagRec [ 'site_name' ] != "" :
site = Prev_MagRec [ 'site_name' ]
else : # parse out the site name
site = pmag . parse_site ( OrRec [ "sample_name" ] , samp_con , Z )
MagRec [ "er_site_name" ] = site
site_description = ""
# overwrite any prior description
if 'site_description' in list ( OrRec . keys ( ) ) and OrRec [ 'site_description' ] != "" :
site_description = OrRec [ 'site_description' ] . replace ( "," , ";" )
if "image_name" in list ( OrRec . keys ( ) ) :
images = OrRec [ "image_name" ] . split ( ":" )
if "image_look" in list ( OrRec . keys ( ) ) :
looks = OrRec [ 'image_look' ] . split ( ":" )
else :
looks = [ ]
if "image_photographer" in list ( OrRec . keys ( ) ) :
photographers = OrRec [ 'image_photographer' ] . split ( ":" )
else :
photographers = [ ]
for image in images :
if image != "" and image not in imagelist :
imagelist . append ( image )
ImageRec = { }
ImageRec [ 'er_image_name' ] = image
ImageRec [ 'image_type' ] = "outcrop"
ImageRec [ 'image_date' ] = sample_date
ImageRec [ 'er_citation_names' ] = "This study"
ImageRec [ 'er_location_name' ] = location_name
ImageRec [ 'er_site_name' ] = MagRec [ 'er_site_name' ]
k = images . index ( image )
if len ( looks ) > k :
ImageRec [ 'er_image_description' ] = "Look direction: " + looks [ k ]
elif len ( looks ) >= 1 :
ImageRec [ 'er_image_description' ] = "Look direction: " + looks [ - 1 ]
else :
ImageRec [ 'er_image_description' ] = "Look direction: unknown"
if len ( photographers ) > k :
ImageRec [ 'er_photographer_mail_names' ] = photographers [ k ]
elif len ( photographers ) >= 1 :
ImageRec [ 'er_photographer_mail_names' ] = photographers [ - 1 ]
else :
ImageRec [ 'er_photographer_mail_names' ] = "unknown"
ImageOuts . append ( ImageRec )
if site not in sitelist :
sitelist . append ( site )
# collect unique site names
# patch added by rshaar 7/2016
# if sample _ name already exists in er _ samples . txt :
# merge the new data colmuns calculated by orientation _ magic with the existing data colmuns
# this is done to make sure no previous data in er _ samples . txt and
# er _ sites . txt is lost .
if site in list ( SiteRecs_sorted . keys ( ) ) :
Prev_MagRec = SiteRecs_sorted [ site ] [ - 1 ]
SiteRec = Prev_MagRec
else :
Prev_MagRec = { }
SiteRec = { }
SiteRec [ "er_citation_names" ] = "This study"
SiteRec [ "er_site_name" ] = site
SiteRec [ "site_definition" ] = "s"
if "er_location_name" in SiteRec and SiteRec . get ( "er_location_name" ) :
pass
elif key in list ( Prev_MagRec . keys ( ) ) and Prev_MagRec [ key ] != "" :
SiteRec [ key ] = Prev_MagRec [ key ]
else :
print ( 'setting location name to ""' )
SiteRec [ key ] = ""
for key in [ "lat" , "lon" , "height" ] :
if "site_" + key in list ( Prev_MagRec . keys ( ) ) and Prev_MagRec [ "site_" + key ] != "" :
SiteRec [ "site_" + key ] = Prev_MagRec [ "site_" + key ]
else :
SiteRec [ "site_" + key ] = MagRec [ "sample_" + key ]
# SiteRec [ " site _ lat " ] = MagRec [ " sample _ lat " ]
# SiteRec [ " site _ lon " ] = MagRec [ " sample _ lon " ]
# SiteRec [ " site _ height " ] = MagRec [ " sample _ height " ]
for key in [ "class" , "lithology" , "type" ] :
if "site_" + key in list ( Prev_MagRec . keys ( ) ) and Prev_MagRec [ "site_" + key ] != "Not Specified" :
SiteRec [ "site_" + key ] = Prev_MagRec [ "site_" + key ]
else :
SiteRec [ "site_" + key ] = MagRec [ "sample_" + key ]
# SiteRec [ " site _ class " ] = MagRec [ " sample _ class " ]
# SiteRec [ " site _ lithology " ] = MagRec [ " sample _ lithology " ]
# SiteRec [ " site _ type " ] = MagRec [ " sample _ type " ]
if site_description != "" : # overwrite only if site _ description has something
SiteRec [ "site_description" ] = site_description
SiteOuts . append ( SiteRec )
if sample not in samplelist :
samplelist . append ( sample )
if MagRec [ 'sample_azimuth' ] != "" : # assume magnetic compass only
MagRec [ 'magic_method_codes' ] = MagRec [ 'magic_method_codes' ] + ':SO-MAG'
MagRec [ 'magic_method_codes' ] = MagRec [ 'magic_method_codes' ] . strip ( ":" )
SampOuts . append ( MagRec )
if MagRec [ 'sample_azimuth' ] != "" and dec_correction_con != 3 :
az = labaz + dec_correction
if az > 360. :
az = az - 360.
CMDRec = { }
for key in list ( MagRec . keys ( ) ) :
CMDRec [ key ] = MagRec [ key ]
# make a copy of MagRec
CMDRec [ "sample_azimuth" ] = '%7.1f' % ( az )
CMDRec [ "magic_method_codes" ] = methcodes + ':SO-CMD-NORTH'
CMDRec [ "magic_method_codes" ] = CMDRec [ 'magic_method_codes' ] . strip ( ':' )
CMDRec [ "sample_declination_correction" ] = '%7.1f' % ( dec_correction )
if dec_correction_con == 1 :
CMDRec [ 'sample_description' ] = sample_description + ':Declination correction calculated from IGRF'
else :
CMDRec [ 'sample_description' ] = sample_description + ':Declination correction supplied by user'
CMDRec [ "sample_description" ] = CMDRec [ 'sample_description' ] . strip ( ':' )
SampOuts . append ( CMDRec )
if "mag_az_bs" in list ( OrRec . keys ( ) ) and OrRec [ "mag_az_bs" ] != "" and OrRec [ "mag_az_bs" ] != " " :
SRec = { }
for key in list ( MagRec . keys ( ) ) :
SRec [ key ] = MagRec [ key ]
# make a copy of MagRec
labaz = float ( OrRec [ "mag_az_bs" ] )
az = labaz + dec_correction
if az > 360. :
az = az - 360.
SRec [ "sample_azimuth" ] = '%7.1f' % ( az )
SRec [ "sample_declination_correction" ] = '%7.1f' % ( dec_correction )
SRec [ "magic_method_codes" ] = methcodes + ':SO-SIGHT-BACK:SO-CMD-NORTH'
SampOuts . append ( SRec )
# check for suncompass data
# there are sun compass data
if "shadow_angle" in list ( OrRec . keys ( ) ) and OrRec [ "shadow_angle" ] != "" :
if hours_from_gmt == "" : # hours _ from _ gmt = raw _ input ( " Enter hours to subtract from time for GMT : [ 0 ] " )
hours_from_gmt = 0
SunRec , sundata = { } , { }
shad_az = float ( OrRec [ "shadow_angle" ] )
if not OrRec [ "hhmm" ] :
print ( 'If using the column shadow_angle for sun compass data, you must also provide the time for each sample. Sample ' , sample , ' has shadow_angle but is missing the "hh:mm" column.' )
else : # calculate sun declination
sundata [ "date" ] = '%i:%s:%s:%s' % ( yy , mmddyy [ 0 ] , mmddyy [ 1 ] , OrRec [ "hhmm" ] )
sundata [ "delta_u" ] = hours_from_gmt
sundata [ "lon" ] = lon
# do not truncate !
sundata [ "lat" ] = lat
# do not truncate !
sundata [ "shadow_angle" ] = OrRec [ "shadow_angle" ]
# now you can truncate
sundec = '%7.1f' % ( pmag . dosundec ( sundata ) )
for key in list ( MagRec . keys ( ) ) :
SunRec [ key ] = MagRec [ key ]
# make a copy of MagRec
SunRec [ "sample_azimuth" ] = sundec
# do not truncate !
SunRec [ "sample_declination_correction" ] = ''
SunRec [ "magic_method_codes" ] = methcodes + ':SO-SUN'
SunRec [ "magic_method_codes" ] = SunRec [ 'magic_method_codes' ] . strip ( ':' )
SampOuts . append ( SunRec )
# check for differential GPS data
# there are diff GPS data
if "prism_angle" in list ( OrRec . keys ( ) ) and OrRec [ "prism_angle" ] != "" :
GPSRec = { }
for key in list ( MagRec . keys ( ) ) :
GPSRec [ key ] = MagRec [ key ]
# make a copy of MagRec
prism_angle = float ( OrRec [ "prism_angle" ] )
sundata [ "shadow_angle" ] = OrRec [ "shadow_angle" ]
sundec = pmag . dosundec ( sundata )
for key in list ( MagRec . keys ( ) ) :
SunRec [ key ] = MagRec [ key ]
# make a copy of MagRec
SunRec [ "sample_azimuth" ] = '%7.1f' % ( sundec )
SunRec [ "sample_declination_correction" ] = ''
SunRec [ "magic_method_codes" ] = methcodes + ':SO-SUN'
SunRec [ "magic_method_codes" ] = SunRec [ 'magic_method_codes' ] . strip ( ':' )
SampOuts . append ( SunRec )
# check for differential GPS data
# there are diff GPS data
if "prism_angle" in list ( OrRec . keys ( ) ) and OrRec [ "prism_angle" ] != "" :
GPSRec = { }
for key in list ( MagRec . keys ( ) ) :
GPSRec [ key ] = MagRec [ key ]
# make a copy of MagRec
prism_angle = float ( OrRec [ "prism_angle" ] )
laser_angle = float ( OrRec [ "laser_angle" ] )
if OrRec [ "GPS_baseline" ] != "" :
baseline = float ( OrRec [ "GPS_baseline" ] )
# new baseline
gps_dec = baseline + laser_angle + prism_angle - 90.
while gps_dec > 360. :
gps_dec = gps_dec - 360.
while gps_dec < 0 :
gps_dec = gps_dec + 360.
for key in list ( MagRec . keys ( ) ) :
GPSRec [ key ] = MagRec [ key ]
# make a copy of MagRec
GPSRec [ "sample_azimuth" ] = '%7.1f' % ( gps_dec )
GPSRec [ "sample_declination_correction" ] = ''
GPSRec [ "magic_method_codes" ] = methcodes + ':SO-GPS-DIFF'
SampOuts . append ( GPSRec )
# there are differential GPS Azimuth data
if "GPS_Az" in list ( OrRec . keys ( ) ) and OrRec [ "GPS_Az" ] != "" :
GPSRec = { }
for key in list ( MagRec . keys ( ) ) :
GPSRec [ key ] = MagRec [ key ]
# make a copy of MagRec
GPSRec [ "sample_azimuth" ] = '%7.1f' % ( float ( OrRec [ "GPS_Az" ] ) )
GPSRec [ "sample_declination_correction" ] = ''
GPSRec [ "magic_method_codes" ] = methcodes + ':SO-GPS-DIFF'
SampOuts . append ( GPSRec )
if average_bedding != "0" and fpars :
fpars = pmag . fisher_mean ( BPs )
print ( 'over-writing all bedding with average ' )
Samps = [ ]
for rec in SampOuts :
if average_bedding != "0" and fpars :
rec [ 'sample_bed_dip_direction' ] = '%7.1f' % ( fpars [ 'dec' ] )
rec [ 'sample_bed_dip' ] = '%7.1f' % ( fpars [ 'inc' ] + 90. )
Samps . append ( rec )
else :
Samps . append ( rec )
for rec in SampRecs :
if rec [ 'er_sample_name' ] not in samplelist : # overwrite prior for this sample
Samps . append ( rec )
for rec in SiteRecs :
if rec [ 'er_site_name' ] not in sitelist : # overwrite prior for this sample
SiteOuts . append ( rec )
for rec in ImageRecs :
if rec [ 'er_image_name' ] not in imagelist : # overwrite prior for this sample
ImageOuts . append ( rec )
print ( 'saving data...' )
SampsOut , keys = pmag . fillkeys ( Samps )
Sites , keys = pmag . fillkeys ( SiteOuts )
if data_model == 3 :
SampsOut3 = [ ]
Sites3 = [ ]
for samp_rec in SampsOut :
new_rec = map_magic . mapping ( samp_rec , map_magic . samp_magic2_2_magic3_map )
SampsOut3 . append ( new_rec )
for site_rec in Sites :
new_rec = map_magic . mapping ( site_rec , map_magic . site_magic2_2_magic3_map )
Sites3 . append ( new_rec )
wrote_samps = pmag . magic_write ( samp_file , SampsOut3 , "samples" )
wrote_sites = pmag . magic_write ( site_file , Sites3 , "sites" )
else :
wrote_samps = pmag . magic_write ( samp_file , SampsOut , "er_samples" )
wrote_sites = pmag . magic_write ( site_file , Sites , "er_sites" )
if wrote_samps :
print ( "Data saved in " , samp_file , ' and ' , site_file )
else :
print ( "No data found" )
if len ( ImageOuts ) > 0 : # need to do conversion here 3.0 . - - > 2.5
Images , keys = pmag . fillkeys ( ImageOuts )
image_type = "er_images"
if data_model == 3 : # convert 2.5 - - > 3.0.
image_type = "images"
Images2 = Images
Images = [ ]
for image_rec in Images2 :
Images . append ( map_magic . mapping ( image_rec , map_magic . image_magic2_2_magic3_map ) )
pmag . magic_write ( image_file , Images , image_type )
print ( "Image info saved in " , image_file )
return True , None |
def get_collection_datasets ( collection_id , ** kwargs ) :
"""Get all the datasets from the collection with the specified name""" | collection_datasets = db . DBSession . query ( Dataset ) . filter ( Dataset . id == DatasetCollectionItem . dataset_id , DatasetCollectionItem . collection_id == DatasetCollection . id , DatasetCollection . id == collection_id ) . all ( )
return collection_datasets |
def update_docs ( readme = True , makefiles = True ) :
"""Update documentation ( ready for publishing new release )
Usually called by ` ` make docs ` `
: param bool make _ doc : generate DOC page from Makefile help messages""" | if readme :
_pandoc = get_external_executable ( "pandoc" )
rst2markdown_github ( os . path . join ( _HERE , "README.rst" ) , os . path . join ( _HERE , "README.md" ) , pandoc = _pandoc )
if makefiles :
_make = get_external_executable ( "make" )
project_makefile_dir = os . path . abspath ( _HERE )
project_makefile_rst = os . path . join ( _HERE , 'docs' , 'src' , 'project_makefile.rst' )
docs_makefile_dir = os . path . join ( _HERE , 'docs' , 'src' )
docs_makefile_rst = os . path . join ( _HERE , 'docs' , 'src' , 'docs_makefile.rst' )
# : ` ` help2rst _ queue ` ` stores tuples of
# : ` ` ( cwd , help _ cmd , path _ to _ rst _ file , rst _ title _ of _ new _ file ) ` `
help2rst_queue = [ ( project_makefile_dir , [ _make , "help" ] , project_makefile_rst , "Project ``Makefile``" ) , ( docs_makefile_dir , [ _make , "help" ] , docs_makefile_rst , "Documentation ``Makefile``" ) ]
for cwd , help_cmd , outfile , title in help2rst_queue :
console_help2rst ( cwd , help_cmd , outfile , title , format_as_code = True ) |
def serve_forever ( self , fork = False ) :
"""Start handling requests . This method must be called and does not
return unless the : py : meth : ` . shutdown ` method is called from
another thread .
: param bool fork : Whether to fork or not before serving content .
: return : The child processes PID if * fork * is set to True .
: rtype : int""" | if fork :
if not hasattr ( os , 'fork' ) :
raise OSError ( 'os.fork is not available' )
child_pid = os . fork ( )
if child_pid != 0 :
self . logger . info ( 'forked child process: ' + str ( child_pid ) )
return child_pid
self . __server_thread = threading . current_thread ( )
self . __wakeup_fd = WakeupFd ( )
self . __is_shutdown . clear ( )
self . __should_stop . clear ( )
self . __is_running . set ( )
while not self . __should_stop . is_set ( ) :
try :
self . _serve_ready ( )
except socket . error :
self . logger . warning ( 'encountered socket error, stopping server' )
self . __should_stop . set ( )
self . __is_shutdown . set ( )
self . __is_running . clear ( )
return 0 |
def purge ( self ) :
"""Submit purge request ( s ) to the CCU API
Since a purge call may require multiple API requests and may trigger rate - limiting
this method uses a generator to provide the results of each request , allowing you to
communicate request progress or implement a custom rate - limiting response : :
for url _ batch , response in purge _ request . purge ( ) :
if response . ok :
# update progress
elif response . status _ code = = 507:
# Rate - limiting . Do something ?
If you simply want a function which blocks until all of the purge requests have been
issued , use ` purge _ all ( ) ` .
Both ` purge ( ) ` and ` purge _ all ( ) ` will raise HTTP exceptions for any error response
other than rate - limiting .""" | purge_url = urljoin ( 'https://%s' % self . host , '/ccu/v3/%s/url/%s' % ( self . action , self . network ) )
while self . urls : # We ' ll accumulate
batch = [ ]
batch_size = 0
while self . urls and batch_size < self . MAX_REQUEST_SIZE :
next_url = self . urls . pop ( )
if not isinstance ( next_url , bytes ) :
next_url = next_url . encode ( 'utf-8' )
batch . append ( next_url )
batch_size += len ( next_url )
if batch :
data = { 'objects' : batch }
logger . debug ( 'Requesting Akamai purge %d URLs' , len ( batch ) )
response = requests . post ( url = purge_url , auth = self . auth , data = json . dumps ( data ) , headers = { 'Content-Type' : 'application/json' } )
if not response . ok : # We ' ll return the current batch to the queue so they can be retried later :
self . urls . extend ( batch )
# Raise an exception for errors other than rate - limiting :
if response . status_code != 507 :
response . raise_for_status ( )
yield batch , response |
def fileish_open ( fileish , mode ) :
"""Convert file - ish object to BytesIO like object .
: param fileish : the file - ihs object ( str , BytesIO , bytes , file contents )
: param str mode : mode for the open function .
: rtype : BytesIO""" | if mode is not None and any ( m in mode for m in [ '+' , 'w' , 'a' , 'x' ] ) :
attr = 'write'
else :
attr = 'read'
if hasattr ( fileish , attr ) and hasattr ( fileish , 'seek' ) : # BytesIO - like object
return fileish
elif isinstance ( fileish , str ) : # Python2 - file path , file contents in the case of a TypeError
# Python3 - file path
try :
return open ( fileish , mode )
except TypeError :
return io . BytesIO ( fileish )
else : # Python 3 - file contents
return io . BytesIO ( fileish ) |
def activity_has_provenance ( self , activity , prov_ids ) : # type : ( str , List [ Identifier ] ) - > None
"""Add http : / / www . w3 . org / TR / prov - aq / relations to nested PROV files .""" | # NOTE : The below will only work if the corresponding metadata / provenance arcp URI
# is a pre - registered namespace in the PROV Document
attribs = [ ( PROV [ "has_provenance" ] , prov_id ) for prov_id in prov_ids ]
self . document . activity ( activity , other_attributes = attribs )
# Tip : we can ' t use https : / / www . w3 . org / TR / prov - links / # term - mention
# as prov : mentionOf ( ) is only for entities , not activities
uris = [ i . uri for i in prov_ids ]
self . research_object . add_annotation ( activity , uris , PROV [ "has_provenance" ] . uri ) |
def account ( self , address ) :
"""Returns information and links relating to a single account .
` GET / accounts / { account }
< https : / / www . stellar . org / developers / horizon / reference / endpoints / accounts - single . html > ` _
: param str address : The account ID to retrieve details about .
: return : The account details in a JSON response .
: rtype : dict""" | endpoint = '/accounts/{account_id}' . format ( account_id = address )
return self . query ( endpoint ) |
def resource_request_send ( self , request_id , uri_type , uri , transfer_type , storage , force_mavlink1 = False ) :
'''The autopilot is requesting a resource ( file , binary , other type of
data )
request _ id : Request ID . This ID should be re - used when sending back URI contents ( uint8 _ t )
uri _ type : The type of requested URI . 0 = a file via URL . 1 = a UAVCAN binary ( uint8 _ t )
uri : The requested unique resource identifier ( URI ) . It is not necessarily a straight domain name ( depends on the URI type enum ) ( uint8 _ t )
transfer _ type : The way the autopilot wants to receive the URI . 0 = MAVLink FTP . 1 = binary stream . ( uint8 _ t )
storage : The storage path the autopilot wants the URI to be stored in . Will only be valid if the transfer _ type has a storage associated ( e . g . MAVLink FTP ) . ( uint8 _ t )''' | return self . send ( self . resource_request_encode ( request_id , uri_type , uri , transfer_type , storage ) , force_mavlink1 = force_mavlink1 ) |
def properties ( self ) :
"""All reaction properties as a dict""" | properties = { 'id' : self . _id , 'reversible' : self . _rev , 'equation' : self . _equation }
if 'name' in self . _root . attrib :
properties [ 'name' ] = self . _root . get ( 'name' )
if self . _lower_flux is not None :
properties [ 'lower_flux' ] = self . _lower_flux
if self . _upper_flux is not None :
properties [ 'upper_flux' ] = self . _upper_flux
return properties |
def visitvalues ( self , func ) :
"""Run ` ` func ` ` on each object .
Note : If ` ` func ` ` returns ` ` None ` ` ( or doesn ' t return ) ,
iteration continues . However , if ` ` func ` ` returns
anything else , it ceases and returns that value .
Examples
> > > import zarr
> > > g1 = zarr . group ( )
> > > g2 = g1 . create _ group ( ' foo ' )
> > > g3 = g1 . create _ group ( ' bar ' )
> > > g4 = g3 . create _ group ( ' baz ' )
> > > g5 = g3 . create _ group ( ' quux ' )
> > > def print _ visitor ( obj ) :
. . . print ( obj )
> > > g1 . visitvalues ( print _ visitor )
< zarr . hierarchy . Group ' / bar ' >
< zarr . hierarchy . Group ' / bar / baz ' >
< zarr . hierarchy . Group ' / bar / quux ' >
< zarr . hierarchy . Group ' / foo ' >
> > > g3 . visitvalues ( print _ visitor )
< zarr . hierarchy . Group ' / bar / baz ' >
< zarr . hierarchy . Group ' / bar / quux ' >""" | def _visit ( obj ) :
yield obj
keys = sorted ( getattr ( obj , "keys" , lambda : [ ] ) ( ) )
for k in keys :
for v in _visit ( obj [ k ] ) :
yield v
for each_obj in islice ( _visit ( self ) , 1 , None ) :
value = func ( each_obj )
if value is not None :
return value |
def create_stream ( self , uidList = [ ] ) :
'''create a stream''' | req_hook = 'pod/v1/im/create'
req_args = json . dumps ( uidList )
status_code , response = self . __rest__ . POST_query ( req_hook , req_args )
self . logger . debug ( '%s: %s' % ( status_code , response ) )
return status_code , response |
def factorset_product ( * factorsets_list ) :
r"""Base method used for product of factor sets .
Suppose : math : ` \ vec \ phi _ 1 ` and : math : ` \ vec \ phi _ 2 ` are two factor sets then their product is a another factors set
: math : ` \ vec \ phi _ 3 = \ vec \ phi _ 1 \ cup \ vec \ phi _ 2 ` .
Parameters
factorsets _ list : FactorSet1 , FactorSet2 , . . . , FactorSetn
All the factor sets to be multiplied
Returns
Product of factorset in factorsets _ list
Examples
> > > from pgmpy . factors import FactorSet
> > > from pgmpy . factors . discrete import DiscreteFactor
> > > from pgmpy . factors import factorset _ product
> > > phi1 = DiscreteFactor ( [ ' x1 ' , ' x2 ' , ' x3 ' ] , [ 2 , 3 , 2 ] , range ( 12 ) )
> > > phi2 = DiscreteFactor ( [ ' x3 ' , ' x4 ' , ' x1 ' ] , [ 2 , 2 , 2 ] , range ( 8 ) )
> > > factor _ set1 = FactorSet ( phi1 , phi2)
> > > phi3 = DiscreteFactor ( [ ' x5 ' , ' x6 ' , ' x7 ' ] , [ 2 , 2 , 2 ] , range ( 8 ) )
> > > phi4 = DiscreteFactor ( [ ' x5 ' , ' x7 ' , ' x8 ' ] , [ 2 , 2 , 2 ] , range ( 8 ) )
> > > factor _ set2 = FactorSet ( phi3 , phi4)
> > > factor _ set3 = factorset _ product ( factor _ set1 , factor _ set2)
> > > print ( factor _ set3)
set ( [ < DiscreteFactor representing phi ( x1:2 , x2:3 , x3:2 ) at 0x7fb3a1933e90 > ,
< DiscreteFactor representing phi ( x5:2 , x7:2 , x8:2 ) at 0x7fb3a1933f10 > ,
< DiscreteFactor representing phi ( x5:2 , x6:2 , x7:2 ) at 0x7fb3a1933f90 > ,
< DiscreteFactor representing phi ( x3:2 , x4:2 , x1:2 ) at 0x7fb3a1933e10 > ] )""" | if not all ( isinstance ( factorset , FactorSet ) for factorset in factorsets_list ) :
raise TypeError ( "Input parameters must be FactorSet instances" )
return reduce ( lambda x , y : x . product ( y , inplace = False ) , factorsets_list ) |
def run ( self ) :
"""Executes the request .
Returns :
An array of tuples representing the metric evaluations - - each of the form
( < wall time in secs > , < training step > , < metric value > ) .""" | run , tag = metrics . run_tag_from_session_and_metric ( self . _request . session_name , self . _request . metric_name )
body , _ = self . _scalars_plugin_instance . scalars_impl ( tag , run , None , scalars_plugin . OutputFormat . JSON )
return body |
def _run_bunny ( args ) :
"""Run CWL with rabix bunny .""" | main_file , json_file , project_name = _get_main_and_json ( args . directory )
work_dir = utils . safe_makedir ( os . path . join ( os . getcwd ( ) , "bunny_work" ) )
flags = [ "-b" , work_dir ]
log_file = os . path . join ( work_dir , "%s-bunny.log" % project_name )
if os . path . exists ( work_dir ) :
caches = [ os . path . join ( work_dir , d ) for d in os . listdir ( work_dir ) if os . path . isdir ( os . path . join ( work_dir , d ) ) ]
if caches :
flags += [ "--cache-dir" , max ( caches , key = os . path . getmtime ) ]
if args . no_container :
_remove_bcbiovm_path ( )
flags += [ "--no-container" ]
cmd = [ "rabix" ] + flags + [ main_file , json_file ]
with utils . chdir ( work_dir ) :
_run_tool ( cmd , not args . no_container , work_dir , log_file ) |
def stop ( self ) :
"""Stops listening for events .""" | if not self . _is_running :
return
pushcenter_logger . debug ( "[NURESTPushCenter] Stopping..." )
self . _thread . stop ( )
self . _thread . join ( )
self . _is_running = False
self . _current_connection = None
self . _start_time = None
self . _timeout = None |
def vhatg ( v1 , ndim ) :
"""Find the unit vector along a double precision vector of arbitrary dimension .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / vhatg _ c . html
: param v1 : Vector to be normalized .
: type v1 : list [ ndim ]
: param ndim : Dimension of v1 ( and also vout ) .
: type ndim : int
: return : Unit vector v / abs ( v ) .
: rtype : list [ ndim ]""" | v1 = stypes . toDoubleVector ( v1 )
vout = stypes . emptyDoubleVector ( ndim )
ndim = ctypes . c_int ( ndim )
libspice . vhatg_c ( v1 , ndim , vout )
return stypes . cVectorToPython ( vout ) |
def getdef ( self , defname , tag = '*' ) :
"""Return definition element with name * defname *""" | if defname . startswith ( 'a:' ) :
defname = defname [ 2 : ]
for xsd in self . __xsd_trees :
xpath = "./%s[@name='%s']" % ( tag , defname )
elements = xsd . xpath ( xpath )
if elements :
return elements [ 0 ]
raise KeyError ( "no definition named '%s' found" % defname ) |
def list_boards ( ) :
"""! @ brief Generate dictionary with info about supported boards .
Output version history :
- 1.0 , initial version""" | boards = [ ]
obj = { 'pyocd_version' : __version__ , 'version' : { 'major' : 1 , 'minor' : 0 } , 'status' : 0 , 'boards' : boards }
for board_id , info in BOARD_ID_TO_INFO . items ( ) :
d = { 'id' : board_id , 'name' : info . name , 'target' : info . target , 'binary' : info . binary , }
boards . append ( d )
return obj |
def add_backbone_atoms_linearly ( self , start_residue , end_residue , insertion_residues , insertion_residue_map ) :
'''This function returns the PDB content for a structure with the missing backbone atoms - i . e . it adds the
N , Ca , C atoms spaced evenly between the last existing backbone atom of start _ residue and the first existing
backbone atom of end _ residue . O - atoms are not currently added although we could arbitrarily add them at 90
degrees to the plane : If resiC _ x + x = resjC _ x and resiC _ y + y = resjC _ y , i + 1 = j , then the resiO atom could
have coordinates ( resiC _ x - y , resiC _ y + x ) .
Adds backbone atoms for insertion _ residues in a straight line from start _ residue to end _ residue . This is useful
for some computational methods which do not require the atoms to be in the correct coordinates but expect N , CA , and C backbone atoms
to exist for all residues ( O - atoms are currently ignored here ) .
start _ residue and end _ residue are Residue objects . insertion _ residues is a list of PDB residue IDs ( columns 22-27
of ATOM lines in the PDB format ) . insertion _ residue _ map is a mapping from PDB residue IDs to 1 - letter amino acid
codes . The keys of insertion _ residue _ map must be insertion _ residues .
start _ residue and end _ residue must exist in insertion _ residues and the PDB file . There is no technical requirement for this ;
we just do not handle the alternate case yet . residue _ ids are presumed to be ordered in sequence ( N - > C ) order .
Existing N , CA , and C atoms corresponding to these two residues will be retained as long as their atoms which
connect to the side of those residues not identified by residue _ ids are present e . g .
- if the CA atom of the first residue is present , it will be kept as long as the N atom is present and regardless of whether the C atom is present
- if the CA atom of the last residue is present , it will be kept as long as the C atom is present and regardless of whether the N atom is present
All O atoms of residues in residue _ ids are discarded . ANISOU records corresponding to any removed ATOMS will be removed .
1st 2nd n - 1 n
. . . N - CA - C - N - CA - C - . . . N - CA - C - N - CA - C - . .
Note : This function currently only supports canonical amino acids .''' | assert ( sorted ( insertion_residues ) == sorted ( insertion_residue_map . keys ( ) ) )
assert ( start_residue . chain + start_residue . residue_id in insertion_residues )
assert ( end_residue . chain + end_residue . residue_id in insertion_residues )
assert ( start_residue . chain == end_residue . chain )
atoms_to_remove = [ ]
discarded_atoms = [ ]
# Remove atoms from the segment ' s N - terminus residue
# if N and CA and C , keep C else discard C
start_res_atoms_ids = self . get_atom_serial_numbers_from_pdb_residue_ids ( [ insertion_residues [ 0 ] ] )
start_res_atoms = [ self . atoms [ id ] for id in start_res_atoms_ids ]
start_res_atom_types = [ a . name for a in start_res_atoms ]
start_atoms = [ None , None , None ]
for a in start_res_atoms :
if a . name == 'N' :
start_atoms [ 0 ] = a
elif a . name == 'CA' :
start_atoms [ 1 ] = a
elif a . name == 'C' :
start_atoms [ 2 ] = a
else :
discarded_atoms . append ( a . serial_number )
if 'C' in start_res_atom_types and 'CA' not in start_res_atom_types :
discarded_atoms += start_atoms [ 2 ] . serial_number
start_atoms [ 2 ] = None
if not start_atoms [ 0 ] :
raise Exception ( 'The N atom for the start residue must exist.' )
start_atoms = [ a for a in start_atoms if a ]
start_atom = start_atoms [ - 1 ]
# Remove atoms from the segment ' s C - terminus residue
# if N and CA and C , keep N else discard N
end_res_atoms_ids = self . get_atom_serial_numbers_from_pdb_residue_ids ( [ insertion_residues [ - 1 ] ] )
end_res_atoms = [ self . atoms [ id ] for id in end_res_atoms_ids ]
end_res_atom_types = [ a . name for a in end_res_atoms ]
end_atoms = [ None , None , None ]
for a in end_res_atoms :
if a . name == 'N' :
end_atoms [ 0 ] = a
elif a . name == 'CA' :
end_atoms [ 1 ] = a
elif a . name == 'C' :
end_atoms [ 2 ] = a
else :
discarded_atoms . append ( a . serial_number )
if 'N' in end_res_atom_types and 'CA' not in end_res_atom_types :
discarded_atoms += end_atoms [ 0 ] . serial_number
end_atoms [ 0 ] = None
if not end_atoms [ - 1 ] :
raise Exception ( 'The C atom for the end residue must exist.' )
end_atoms = [ a for a in end_atoms if a ]
end_atom = end_atoms [ 0 ]
# Remove all atoms from the remainder of the segment
discarded_atoms += self . get_atom_serial_numbers_from_pdb_residue_ids ( insertion_residues [ 1 : - 1 ] )
# Remove the atoms from the PDB
bonsai_pdb_content , cutting_pdb_content , PSE_file , PSE_script = self . prune ( set ( discarded_atoms ) , generate_pymol_session = False )
self . __init__ ( bonsai_pdb_content , buffer = self . buffer , bin_size = self . bin_size , safe_mode = self . safe_mode )
# Create a list of all N , CA , C atoms for the insertion residues not including those present in the start and end residue
# Find last of N CA C of first residue
# Find last of N CA C of first residue
new_atoms = [ ]
assert ( len ( start_atoms ) >= 1 )
# N is guaranteed to exist
if len ( start_atoms ) == 2 : # add a C atom
residue_id = insertion_residues [ 0 ]
residue_type = insertion_residue_map [ residue_id ]
assert ( residue_type != 'X' and residue_type in residue_type_1to3_map )
new_atoms . append ( ( residue_id , residue_type_1to3_map [ residue_type ] , 'C' ) )
for insertion_residue in insertion_residues [ 1 : - 1 ] : # add an N , CA , C atoms
residue_type = insertion_residue_map [ insertion_residue ]
assert ( residue_type != 'X' and residue_type in residue_type_1to3_map )
residue_type = residue_type_1to3_map [ residue_type ]
new_atoms . append ( ( insertion_residue , residue_type , 'N' ) )
new_atoms . append ( ( insertion_residue , residue_type , 'CA' ) )
new_atoms . append ( ( insertion_residue , residue_type , 'C' ) )
assert ( len ( end_atoms ) >= 1 )
# C is guaranteed to exist
if len ( end_atoms ) == 2 : # add an N atom
residue_id = insertion_residues [ - 1 ]
residue_type = insertion_residue_map [ residue_id ]
assert ( residue_type != 'X' and residue_type in residue_type_1to3_map )
new_atoms . append ( ( residue_id , residue_type_1to3_map [ residue_type ] , 'N' ) )
return self . add_atoms_linearly ( start_atom , end_atom , new_atoms ) |
def first ( self , timeout = None ) :
"""Wait for the first successful result to become available
: param timeout : Wait timeout , sec
: type timeout : float | int | None
: return : result , or None if all threads have failed
: rtype : *""" | while True :
with self . _jobfinished :
if self . _results or not self . _jobs . unfinished_tasks :
break
self . _jobfinished . wait ( timeout )
return self . _results [ 0 ] if self . _results else None |
def view_similarity_matrix ( self , data = None , labels = None , figsize = None , filename = None ) :
"""Plot the similarity map according to the activation map
: param data : Optional parameter for data points to calculate the
similarity with
: type data : numpy . array
: param figsize : Optional parameter to specify the size of the figure .
: type figsize : ( int , int )
: param labels : Optional parameter to specify the label of each point .
: type labels : list of str .
: param filename : If specified , the plot will not be shown but saved to
this file .
: type filename : str .""" | if not have_heatmap :
raise Exception ( "Import dependencies missing for viewing " "similarity matrix. You must have seaborn and " "scikit-learn" )
if data is None and self . activation_map is None :
self . get_surface_state ( )
if data is None :
X = self . activation_map
else :
X = data
# Calculate the pairwise correlations as a metric for similarity
corrmat = 1 - pairwise_distances ( X , metric = "correlation" )
# Set up the matplotlib figure
if figsize is None :
figsize = ( 12 , 9 )
f , ax = plt . subplots ( figsize = figsize )
# Y axis has inverted labels ( seaborn default , no idea why )
if labels is None :
xticklabels = [ ]
yticklabels = [ ]
else :
xticklabels = labels
yticklabels = labels
# Draw the heatmap using seaborn
sns . heatmap ( corrmat , vmax = 1 , vmin = - 1 , square = True , xticklabels = xticklabels , yticklabels = yticklabels , cmap = "RdBu_r" , center = 0 )
f . tight_layout ( )
# This sets the ticks to a readable angle
plt . yticks ( rotation = 0 )
plt . xticks ( rotation = 90 )
# This sets the labels for the two axes
ax . set_yticklabels ( yticklabels , ha = 'right' , va = 'center' , size = 8 )
ax . set_xticklabels ( xticklabels , ha = 'center' , va = 'top' , size = 8 )
# Save and close the figure
if filename is not None :
plt . savefig ( filename , bbox_inches = 'tight' )
else :
plt . show ( )
return plt |
def interpolate_1d ( x , xp , * args , ** kwargs ) :
r"""Interpolates data with any shape over a specified axis .
Interpolation over a specified axis for arrays of any shape .
Parameters
x : array - like
1 - D array of desired interpolated values .
xp : array - like
The x - coordinates of the data points .
args : array - like
The data to be interpolated . Can be multiple arguments , all must be the same shape as
xp .
axis : int , optional
The axis to interpolate over . Defaults to 0.
fill _ value : float , optional
Specify handling of interpolation points out of data bounds . If None , will return
ValueError if points are out of bounds . Defaults to nan .
Returns
array - like
Interpolated values for each point with coordinates sorted in ascending order .
Examples
> > > x = np . array ( [ 1 . , 2 . , 3 . , 4 . ] )
> > > y = np . array ( [ 1 . , 2 . , 3 . , 4 . ] )
> > > x _ interp = np . array ( [ 2.5 , 3.5 ] )
> > > metpy . calc . interp ( x _ interp , x , y )
array ( [ 2.5 , 3.5 ] )
Notes
xp and args must be the same shape .""" | # Pull out keyword args
fill_value = kwargs . pop ( 'fill_value' , np . nan )
axis = kwargs . pop ( 'axis' , 0 )
# Make x an array
x = np . asanyarray ( x ) . reshape ( - 1 )
# Save number of dimensions in xp
ndim = xp . ndim
# Sort input data
sort_args = np . argsort ( xp , axis = axis )
sort_x = np . argsort ( x )
# indices for sorting
sorter = broadcast_indices ( xp , sort_args , ndim , axis )
# sort xp
xp = xp [ sorter ]
# Ensure pressure in increasing order
variables = [ arr [ sorter ] for arr in args ]
# Make x broadcast with xp
x_array = x [ sort_x ]
expand = [ np . newaxis ] * ndim
expand [ axis ] = slice ( None )
x_array = x_array [ tuple ( expand ) ]
# Calculate value above interpolated value
minv = np . apply_along_axis ( np . searchsorted , axis , xp , x [ sort_x ] )
minv2 = np . copy ( minv )
# If fill _ value is none and data is out of bounds , raise value error
if ( ( np . max ( minv ) == xp . shape [ axis ] ) or ( np . min ( minv ) == 0 ) ) and fill_value is None :
raise ValueError ( 'Interpolation point out of data bounds encountered' )
# Warn if interpolated values are outside data bounds , will make these the values
# at end of data range .
if np . max ( minv ) == xp . shape [ axis ] :
warnings . warn ( 'Interpolation point out of data bounds encountered' )
minv2 [ minv == xp . shape [ axis ] ] = xp . shape [ axis ] - 1
if np . min ( minv ) == 0 :
minv2 [ minv == 0 ] = 1
# Get indices for broadcasting arrays
above = broadcast_indices ( xp , minv2 , ndim , axis )
below = broadcast_indices ( xp , minv2 - 1 , ndim , axis )
if np . any ( x_array < xp [ below ] ) :
warnings . warn ( 'Interpolation point out of data bounds encountered' )
# Create empty output list
ret = [ ]
# Calculate interpolation for each variable
for var in variables : # Var needs to be on the * left * of the multiply to ensure that if it ' s a pint
# Quantity , it gets to control the operation - - at least until we make sure
# masked arrays and pint play together better . See https : / / github . com / hgrecco / pint # 633
var_interp = var [ below ] + ( var [ above ] - var [ below ] ) * ( ( x_array - xp [ below ] ) / ( xp [ above ] - xp [ below ] ) )
# Set points out of bounds to fill value .
var_interp [ minv == xp . shape [ axis ] ] = fill_value
var_interp [ x_array < xp [ below ] ] = fill_value
# Check for input points in decreasing order and return output to match .
if x [ 0 ] > x [ - 1 ] :
var_interp = np . swapaxes ( np . swapaxes ( var_interp , 0 , axis ) [ : : - 1 ] , 0 , axis )
# Output to list
ret . append ( var_interp )
if len ( ret ) == 1 :
return ret [ 0 ]
else :
return ret |
def setColumn ( self , column ) :
"""Sets the column instance for this edit to the given column .
: param column | < orb . Column >""" | if ( not column ) :
return
self . _columnName = column . name ( )
if ( column . columnType ( ) != ColumnType . ForeignKey ) :
return
if ( self . _editor ) :
self . _editor . setTableType ( column . referenceModel ( ) )
self . _editor . setRequired ( column . required ( ) ) |
def set_bg ( self , bg , key = "data" , attrs = { } ) :
"""Set the background data
Parameters
bg : numbers . Real , 2d ndarray , ImageData , or h5py . Dataset
The background data . If ` bg ` is an ` h5py . Dataset ` object ,
it must exist in the same hdf5 file ( a hard link is created ) .
If set to ` None ` , the data will be removed .
key : str
One of : const : ` VALID _ BG _ KEYS ` )
attrs : dict
List of background attributes
See Also
del _ bg : removing background data""" | if key not in VALID_BG_KEYS :
raise ValueError ( "Invalid bg key: {}" . format ( key ) )
# remove previous background key
if key in self . h5 [ "bg_data" ] :
del self . h5 [ "bg_data" ] [ key ]
# set background
if isinstance ( bg , ( numbers . Real , np . ndarray ) ) :
dset = write_image_dataset ( group = self . h5 [ "bg_data" ] , key = key , data = bg , h5dtype = self . h5dtype )
for kw in attrs :
dset . attrs [ kw ] = attrs [ kw ]
elif isinstance ( bg , h5py . Dataset ) : # Create a hard link
# ( This functionality was intended for saving memory when storing
# large QPSeries with universal background data , i . e . when using
# ` QPSeries . add _ qpimage ` with the ` bg _ from _ idx ` keyword . )
self . h5 [ "bg_data" ] [ key ] = bg
elif bg is not None :
msg = "Unknown background data type: {}" . format ( bg )
raise ValueError ( msg ) |
def inn ( self ) -> str :
"""Generate random , but valid ` ` INN ` ` .
: return : INN .""" | def control_sum ( nums : list , t : str ) -> int :
digits = { 'n2' : [ 7 , 2 , 4 , 10 , 3 , 5 , 9 , 4 , 6 , 8 ] , 'n1' : [ 3 , 7 , 2 , 4 , 10 , 3 , 5 , 9 , 4 , 6 , 8 ] , }
number = 0
length = digits [ t ]
for i in range ( 0 , len ( length ) ) :
number += nums [ i ] * length [ i ]
return number % 11 % 10
numbers = [ ]
for x in range ( 0 , 10 ) :
numbers . append ( self . random . randint ( 1 if x == 0 else 0 , 9 ) )
n2 = control_sum ( numbers , 'n2' )
numbers . append ( n2 )
n1 = control_sum ( numbers , 'n1' )
numbers . append ( n1 )
return '' . join ( [ str ( x ) for x in numbers ] ) |
async def _async_wait_for_process ( future_process : Any , out : Optional [ Union [ TeeCapture , IO [ str ] ] ] = sys . stdout , err : Optional [ Union [ TeeCapture , IO [ str ] ] ] = sys . stderr ) -> CommandOutput :
"""Awaits the creation and completion of an asynchronous process .
Args :
future _ process : The eventually created process .
out : Where to write stuff emitted by the process ' stdout .
err : Where to write stuff emitted by the process ' stderr .
Returns :
A ( captured output , captured error output , return code ) triplet .""" | process = await future_process
future_output = _async_forward ( process . stdout , out )
future_err_output = _async_forward ( process . stderr , err )
output , err_output = await asyncio . gather ( future_output , future_err_output )
await process . wait ( )
return CommandOutput ( output , err_output , process . returncode ) |
def find_all ( self , model_class , params = { } ) :
"""Return an list of models from the API and caches the result .
Args :
model _ class ( : class : ` cinder _ data . model . CinderModel ` ) : A subclass of
: class : ` cinder _ data . model . CinderModel ` of your chosen model .
params ( dict , optional ) : Description
Returns :
list : A list of instances of you model _ class or and empty list .""" | url = '{host}/{namespace}/{model}{params}' . format ( host = self . _host , namespace = self . _namespace , model = self . _translate_name ( model_class . __name__ ) , params = self . _build_param_string ( params ) )
data = self . _get_json ( url ) [ 'data' ]
fresh_models = [ ]
for item in data :
fresh_model = model_class ( item [ 'attributes' ] )
fresh_model . id = item [ 'id' ]
fresh_model . validate ( )
fresh_models . append ( fresh_model )
if self . _cache is not None :
self . _cache . set_record ( model_class . __name__ , fresh_model . id , fresh_model )
return fresh_models |
def make_dist_mat ( xy1 , xy2 , longlat = True ) :
"""Return a distance matrix between two set of coordinates .
Use geometric distance ( default ) or haversine distance ( if longlat = True ) .
Parameters
xy1 : numpy . array
The first set of coordinates as [ ( x , y ) , ( x , y ) , ( x , y ) ] .
xy2 : numpy . array
The second set of coordinates as [ ( x , y ) , ( x , y ) , ( x , y ) ] .
longlat : boolean , optionnal
Whether the coordinates are in geographic ( longitude / latitude ) format
or not ( default : False )
Returns
mat _ dist : numpy . array
The distance matrix between xy1 and xy2""" | if longlat :
return hav_dist ( xy1 [ : , None ] , xy2 )
else :
d0 = np . subtract . outer ( xy1 [ : , 0 ] , xy2 [ : , 0 ] )
d1 = np . subtract . outer ( xy1 [ : , 1 ] , xy2 [ : , 1 ] )
return np . hypot ( d0 , d1 ) |
def part ( x , * args ) :
'''part ( x , ii , jj . . . ) is equivalent to x [ ii , jj . . . ] if x is a sparse matrix or numpy array and is
equivalent to np . asarray ( x ) [ ii ] [ : , jj ] [ . . . ] if x is not . If only one argument is passed and
it is a tuple , then it is passed like x [ ii ] alone .
The part function is comparible with slices ( though the must be entered using the slice ( . . . )
rather than the : syntax ) and Ellipsis .''' | n = len ( args )
sl = slice ( None )
if sps . issparse ( x ) :
if n == 1 :
return x [ args [ 0 ] ]
elif n > 2 :
raise ValueError ( 'Too many indices for sparse matrix' )
( ii , jj ) = args
if ii is Ellipsis :
ii = sl
elif jj is Ellipsis :
jj = sl
ni = pimms . is_number ( ii )
nj = pimms . is_number ( jj )
if ni and nj :
return x [ ii , jj ]
elif ni :
return x [ ii , jj ] . toarray ( ) [ 0 ]
elif nj :
return x [ ii , jj ] . toarray ( ) [ : , 0 ]
else :
return x [ ii ] [ : , jj ]
else :
x = np . asarray ( x )
if n == 1 :
return x [ args [ 0 ] ]
i0 = [ ]
for ( k , arg ) in enumerate ( args ) :
if arg is Ellipsis : # special case . . .
# if Ellipsis in args [ ii + 1 : ] : raise ValueError ( ' only one ellipsis allowed per part ' )
left = n - k - 1
i0 = [ sl for _ in range ( len ( x . shape ) - left ) ]
else :
x = x [ tuple ( i0 + [ arg ] ) ]
if not pimms . is_number ( arg ) :
i0 . append ( sl )
return x |
def __get_diff_recent ( ) :
'''Generate the difference of posts . recently .''' | diff_str = ''
for key in router_post :
recent_posts = MPost . query_recent_edited ( tools . timestamp ( ) - TIME_LIMIT , kind = key )
for recent_post in recent_posts :
hist_rec = MPostHist . get_last ( recent_post . uid )
if hist_rec :
raw_title = hist_rec . title
new_title = recent_post . title
infobox = diff_table ( raw_title , new_title )
diff_str = diff_str + '''
<h2 style="color:red;font-size:larger;font-weight:70;">TITLE: {0}</h2>
''' . format ( recent_post . title ) + infobox
infobox = diff_table ( hist_rec . cnt_md , recent_post . cnt_md )
diff_str = diff_str + '<h3>CONTENT:{0}</h3>' . format ( recent_post . title ) + infobox + '</hr>'
else :
continue
return diff_str |
def get_worksheet ( gc , gfile_id , wks_name , write_access = False , new_sheet_dimensions = ( 1000 , 100 ) ) :
"""DOCS . . .""" | spsh = gc . open_by_key ( gfile_id )
# if worksheet name is not provided , take first worksheet
if wks_name is None :
wks = spsh . sheet1
# if worksheet name provided and exist in given spreadsheet
else :
try :
wks = spsh . worksheet ( wks_name )
except : # rows , cols = new _ sheet _ dimensions
wks = spsh . add_worksheet ( wks_name , * new_sheet_dimensions ) if write_access == True else None
return wks |
def mdev ( data , rate = 1.0 , data_type = "phase" , taus = None ) :
"""Modified Allan deviation .
Used to distinguish between White and Flicker Phase Modulation .
. . math : :
\\ sigma ^ 2 _ { MDEV } ( m \\ tau _ 0 ) = { 1 \\ over 2 ( m \\ tau _ 0 ) ^ 2 ( N - 3m + 1 ) }
\\ sum _ { j = 1 } ^ { N - 3m + 1 } \\ lbrace
\\ sum _ { i = j } ^ { j + m - 1 } { x } _ { i + 2m } - 2x _ { i + m } + x _ { i } \\ rbrace ^ 2
Parameters
data : np . array
Input data . Provide either phase or frequency ( fractional ,
adimensional ) .
rate : float
The sampling rate for data , in Hz . Defaults to 1.0
data _ type : { ' phase ' , ' freq ' }
Data type , i . e . phase or frequency . Defaults to " phase " .
taus : np . array
Array of tau values , in seconds , for which to compute statistic .
Optionally set taus = [ " all " | " octave " | " decade " ] for automatic
tau - list generation .
Returns
( taus2 , md , mde , ns ) : tuple
Tuple of values
taus2 : np . array
Tau values for which td computed
md : np . array
Computed mdev for each tau value
mde : np . array
mdev errors
ns : np . array
Values of N used in each mdev calculation
Notes
see http : / / www . leapsecond . com / tools / adev _ lib . c
NIST SP 1065 eqn ( 14 ) and ( 15 ) , page 17""" | phase = input_to_phase ( data , rate , data_type )
( phase , ms , taus_used ) = tau_generator ( phase , rate , taus = taus )
data , taus = np . array ( phase ) , np . array ( taus )
md = np . zeros_like ( ms )
mderr = np . zeros_like ( ms )
ns = np . zeros_like ( ms )
# this is a ' loop - unrolled ' algorithm following
# http : / / www . leapsecond . com / tools / adev _ lib . c
for idx , m in enumerate ( ms ) :
m = int ( m )
# without this we get : VisibleDeprecationWarning :
# using a non - integer number instead of an integer
# will result in an error in the future
tau = taus_used [ idx ]
# First loop sum
d0 = phase [ 0 : m ]
d1 = phase [ m : 2 * m ]
d2 = phase [ 2 * m : 3 * m ]
e = min ( len ( d0 ) , len ( d1 ) , len ( d2 ) )
v = np . sum ( d2 [ : e ] - 2 * d1 [ : e ] + d0 [ : e ] )
s = v * v
# Second part of sum
d3 = phase [ 3 * m : ]
d2 = phase [ 2 * m : ]
d1 = phase [ 1 * m : ]
d0 = phase [ 0 : ]
e = min ( len ( d0 ) , len ( d1 ) , len ( d2 ) , len ( d3 ) )
n = e + 1
v_arr = v + np . cumsum ( d3 [ : e ] - 3 * d2 [ : e ] + 3 * d1 [ : e ] - d0 [ : e ] )
s = s + np . sum ( v_arr * v_arr )
s /= 2.0 * m * m * tau * tau * n
s = np . sqrt ( s )
md [ idx ] = s
mderr [ idx ] = ( s / np . sqrt ( n ) )
ns [ idx ] = n
return remove_small_ns ( taus_used , md , mderr , ns ) |
def set_line_width ( self , width ) :
"Set line width" | self . line_width = width
if ( self . page > 0 ) :
self . _out ( sprintf ( '%.2f w' , width * self . k ) ) |
def get_level_fmt ( self , level ) :
"""Get format for log level .""" | key = None
if level == logging . DEBUG :
key = 'debug'
elif level == logging . INFO :
key = 'info'
elif level == logging . WARNING :
key = 'warning'
elif level == logging . ERROR :
key = 'error'
elif level == logging . CRITICAL :
key = 'critical'
return self . overwrites . get ( key , self . fmt ) |
def run ( self , context ) :
"""Runs the satosa proxy with the given context .
: type context : satosa . context . Context
: rtype : satosa . response . Response
: param context : The request context
: return : response""" | try :
self . _load_state ( context )
spec = self . module_router . endpoint_routing ( context )
resp = self . _run_bound_endpoint ( context , spec )
self . _save_state ( resp , context )
except SATOSANoBoundEndpointError :
raise
except SATOSAError :
satosa_logging ( logger , logging . ERROR , "Uncaught SATOSA error " , context . state , exc_info = True )
raise
except UnknownSystemEntity as err :
satosa_logging ( logger , logging . ERROR , "configuration error: unknown system entity " + str ( err ) , context . state , exc_info = False )
raise
except Exception as err :
satosa_logging ( logger , logging . ERROR , "Uncaught exception" , context . state , exc_info = True )
raise SATOSAUnknownError ( "Unknown error" ) from err
return resp |
def private_key ( self , s ) :
"""Parse text as some kind of private key .
Return a subclass of : class : ` Key < pycoin . key . Key > ` , or None .""" | s = parseable_str ( s )
for f in [ self . wif , self . secret_exponent ] :
v = f ( s )
if v :
return v |
def apply_defaults ( self , flags ) :
"""Applies the defaults for the configured guest OS type . This is
primarily for getting sane settings straight after creating a
new VM , but it can also be applied later .
This is primarily a shortcut , centralizing the tedious job of
getting the recommended settings and translating them into
settings updates . The settings are made at the end of the call ,
but not saved .
in flags of type str
Additional flags , to be defined later .
raises : class : ` OleErrorNotimpl `
This method is not implemented yet .""" | if not isinstance ( flags , basestring ) :
raise TypeError ( "flags can only be an instance of type basestring" )
self . _call ( "applyDefaults" , in_p = [ flags ] ) |
def _recursive_import ( package ) :
"""Args :
package ( py : term : ` package ` ) : Package to walk
Import all modules from a package recursively""" | prefix = '%s.' % ( package . __name__ )
path = getattr ( package , '__path__' , None )
if path :
for submod in pkgutil . walk_packages ( path , prefix = prefix ) :
_import_module ( submod [ 1 ] , submod [ 0 ] . path ) |
def update_object_in_db ( self , obj : Any , table : str , fieldlist : Sequence [ str ] ) -> None :
"""Updates an object in the database ( saves it to the database , where
it exists there already ) .""" | self . ensure_db_open ( )
pkvalue = getattr ( obj , fieldlist [ 0 ] )
valuelist = [ ]
# Non - PK fields first
for f in fieldlist [ 1 : ] :
valuelist . append ( getattr ( obj , f ) )
# Then PK
valuelist . append ( pkvalue )
cursor = self . db . cursor ( )
self . db_exec_with_cursor ( cursor , get_sql_update_by_first_field ( table , fieldlist , self . get_delims ( ) ) , * valuelist ) |
def build_or_install_bokehjs ( ) :
'''Build a new BokehJS ( and install it ) or install a previously build
BokehJS .
If no options ` ` - - build - js ` ` or ` ` - - install - js ` ` are detected , the
user is prompted for what to do .
If ` ` - - existing - js ` ` is detected , then this setup . py is being run from a
packaged sdist , no action is taken .
Note that ` ` - build - js ` ` is only compatible with the following ` ` setup . py ` `
commands : install , develop , sdist , egg _ info , build
Returns :
str : one of ' built ' , ' installed ' , ' packaged '
How ( or if ) BokehJS was installed into the python source tree''' | # This happens when building from inside a published , pre - packaged sdist
# The - - existing - js option is not otherwise documented
if '--existing-js' in sys . argv :
sys . argv . remove ( '--existing-js' )
return "packaged"
if '--build-js' not in sys . argv and '--install-js' not in sys . argv :
jsbuild = jsbuild_prompt ( )
elif '--build-js' in sys . argv :
jsbuild = True
sys . argv . remove ( '--build-js' )
# must be " - - install - js "
else :
jsbuild = False
sys . argv . remove ( '--install-js' )
jsbuild_ok = ( 'install' , 'develop' , 'sdist' , 'egg_info' , 'build' )
if jsbuild and not any ( arg in sys . argv for arg in jsbuild_ok ) :
print ( "Error: Option '--build-js' only valid with 'install', 'develop', 'sdist', or 'build', exiting." )
sys . exit ( 1 )
if jsbuild :
build_js ( )
install_js ( )
return "built"
else :
install_js ( )
return "installed" |
def check_expected_type ( model , expected_type ) :
"""Check if a model is of the right type . Raise error if not .
Parameters
model : model
Any scikit - learn model
expected _ type : Type
Expected type of the scikit - learn .""" | if ( model . __class__ . __name__ != expected_type . __name__ ) :
raise TypeError ( "Expected model of type '%s' (got %s)" % ( expected_type . __name__ , model . __class__ . __name__ ) ) |
def verify_package ( verbose = True ) :
"""Perform a series of checks on the current directory to verify that
it ' s a valid Dallinger experiment .""" | results = ( verify_directory ( verbose ) , verify_experiment_module ( verbose ) , verify_config ( verbose ) , verify_no_conflicts ( verbose ) , )
ok = all ( results )
return ok |
def user_parse ( data ) :
"""Parse information from provider .""" | yield 'id' , data . get ( 'id' )
yield 'email' , data . get ( 'email' )
yield 'first_name' , data . get ( 'given_name' )
yield 'last_name' , data . get ( 'family_name' )
yield 'link' , data . get ( 'link' )
yield 'locale' , data . get ( 'locale' )
yield 'picture' , data . get ( 'picture' )
yield 'gender' , data . get ( 'gender' ) |
def send ( self , payload , * args , ** kwargs ) :
"""Alias for WebSocketServerProtocol ` sendMessage ` method""" | if isinstance ( payload , ( list , dict ) ) :
payload = json . dumps ( payload )
self . sendMessage ( payload . encode ( ) , * args , ** kwargs ) |
def decodeRPCErrorMsg ( e ) :
"""Helper function to decode the raised Exception and give it a
python Exception class""" | found = re . search ( ( "(10 assert_exception: Assert Exception\n|" "3030000 tx_missing_posting_auth)" ".*: (.*)\n" ) , str ( e ) , flags = re . M , )
if found :
return found . group ( 2 ) . strip ( )
else :
return str ( e ) |
def ping_external_urls_handler ( sender , ** kwargs ) :
"""Ping externals URLS when an entry is saved .""" | entry = kwargs [ 'instance' ]
if entry . is_visible and settings . SAVE_PING_EXTERNAL_URLS :
ExternalUrlsPinger ( entry ) |
def dbreference_types ( self ) :
"""Distinct database reference types ( ` ` type _ ` ` ) in : class : ` . models . DbReference `
: return : List of strings for all available database cross reference types used in model DbReference
: rtype : list [ str ]""" | q = self . session . query ( distinct ( models . DbReference . type_ ) )
return [ x [ 0 ] for x in q . all ( ) ] |
def _handle_return ( self , node , scope , ctxt , stream ) :
"""Handle Return nodes
: node : TODO
: scope : TODO
: ctxt : TODO
: stream : TODO
: returns : TODO""" | self . _dlog ( "handling return" )
if node . expr is None :
ret_val = None
else :
ret_val = self . _handle_node ( node . expr , scope , ctxt , stream )
self . _dlog ( "return value = {}" . format ( ret_val ) )
raise errors . InterpReturn ( ret_val ) |
def to_bigquery_fields ( self , name_case = DdlParseBase . NAME_CASE . original ) :
"""Generate BigQuery JSON fields define
: param name _ case : name case type
* DdlParse . NAME _ CASE . original : Return to no convert
* DdlParse . NAME _ CASE . lower : Return to lower
* DdlParse . NAME _ CASE . upper : Return to upper
: return : BigQuery JSON fields define""" | return self . _columns . to_bigquery_fields ( name_case ) |
def get_items ( self ) :
"""This is out of spec , but required for adaptive assessment parts ?""" | ils = get_item_lookup_session ( runtime = self . _runtime , proxy = self . _proxy )
ils . use_federated_bank_view ( )
items = [ ]
if self . has_items ( ) :
for idstr in self . _my_map [ 'itemIds' ] :
items . append ( ils . get_item ( Id ( idstr ) ) )
return ItemList ( items , runtime = self . _runtime , proxy = self . _proxy ) |
def _parse_file ( self ) :
"""Preprocess and parse C file into an AST""" | # We need to set the CPU type to pull in the right register definitions
# only preprocess the file ( - E ) and get rid of gcc extensions that aren ' t
# supported in ISO C .
args = utilities . build_includes ( self . arch . includes ( ) )
# args . append ( ' - mcpu = % s ' % self . arch . property ( ' chip ' ) )
args . append ( '-E' )
args . append ( '-D__attribute__(x)=' )
args . append ( '-D__extension__=' )
self . ast = parse_file ( self . filepath , use_cpp = True , cpp_path = 'arm-none-eabi-gcc' , cpp_args = args ) |
def removeComponent ( self , row , col ) :
"""Removes the component at the given location
: param row : track location of existing component to remove
: type row : int
: param col : location in track of existing component to remove
: type col : int""" | self . _segments [ row ] . pop ( col )
# If this row is now empty we should remove it ?
if self . columnCountForRow ( - 1 ) == 0 :
self . removeRow ( len ( self . _segments ) - 1 )
# in case of samplerate change , just always update
self . updateCalibration ( ) |
def getresponse ( self ) :
"""Get the response from the server .
If the HTTPConnection is in the correct state , returns an
instance of HTTPResponse or of whatever object is returned by
class the response _ class variable .
If a request has not been sent or if a previous response has
not be handled , ResponseNotReady is raised . If the HTTP
response indicates that the connection should be closed , then
it will be closed before the response is returned . When the
connection is closed , the underlying socket is closed .""" | # if a prior response has been completed , then forget about it .
if self . __response and self . __response . isclosed ( ) :
self . __response = None
# if a prior response exists , then it must be completed ( otherwise , we
# cannot read this response ' s header to determine the connection - close
# behavior )
# note : if a prior response existed , but was connection - close , then the
# socket and response were made independent of this HTTPConnection
# object since a new request requires that we open a whole new
# connection
# this means the prior response had one of two states :
# 1 ) will _ close : this connection was reset and the prior socket and
# response operate independently
# 2 ) persistent : the response was retained and we await its
# isclosed ( ) status to become true .
if self . __state != _CS_REQ_SENT or self . __response :
raise ResponseNotReady ( self . __state )
if self . debuglevel > 0 :
response = self . response_class ( self . sock , self . debuglevel , method = self . _method )
else :
response = self . response_class ( self . sock , method = self . _method )
response . begin ( )
assert response . will_close != _UNKNOWN
self . __state = _CS_IDLE
if response . will_close : # this effectively passes the connection to the response
self . close ( )
else : # remember this , so we can tell when it is complete
self . __response = response
return response |
def stream_timeout ( stream , timeout , timeout_msg = None ) :
"""Iterate over items in a streaming response from the Docker client within
a timeout .
: param ~ docker . types . daemon . CancellableStream stream :
Stream from the Docker client to consume items from .
: param timeout :
Timeout value in seconds .
: param timeout _ msg :
Message to raise in the exception when a timeout occurs .""" | timed_out = threading . Event ( )
def timeout_func ( ) :
timed_out . set ( )
stream . close ( )
timer = threading . Timer ( timeout , timeout_func )
try :
timer . start ( )
for item in stream :
yield item
# A timeout looks the same as the loop ending . So we need to check a
# flag to determine whether a timeout occurred or not .
if timed_out . is_set ( ) :
raise TimeoutError ( timeout_msg )
finally :
timer . cancel ( )
# Close the stream ' s underlying response object ( if it has one ) to
# avoid potential socket leaks .
# This method seems to have more success at preventing ResourceWarnings
# than just stream . close ( ) ( should this be improved upstream ? )
# FIXME : Potential race condition if Timer thread closes the stream at
# the same time we do here , but hopefully not with serious side effects
if hasattr ( stream , '_response' ) :
stream . _response . close ( ) |
def setcookie ( self , key , value , max_age = None , expires = None , path = '/' , domain = None , secure = None , httponly = False ) :
"""Add a new cookie""" | newcookie = Morsel ( )
newcookie . key = key
newcookie . value = value
newcookie . coded_value = value
if max_age is not None :
newcookie [ 'max-age' ] = max_age
if expires is not None :
newcookie [ 'expires' ] = expires
if path is not None :
newcookie [ 'path' ] = path
if domain is not None :
newcookie [ 'domain' ] = domain
if secure :
newcookie [ 'secure' ] = secure
if httponly :
newcookie [ 'httponly' ] = httponly
self . sent_cookies = [ c for c in self . sent_cookies if c . key != key ]
self . sent_cookies . append ( newcookie ) |
def other_object_webhook_handler ( event ) :
"""Handle updates to transfer , charge , invoice , invoiceitem , plan , product and source objects .
Docs for :
- charge : https : / / stripe . com / docs / api # charges
- coupon : https : / / stripe . com / docs / api # coupons
- invoice : https : / / stripe . com / docs / api # invoices
- invoiceitem : https : / / stripe . com / docs / api # invoiceitems
- plan : https : / / stripe . com / docs / api # plans
- product : https : / / stripe . com / docs / api # products
- source : https : / / stripe . com / docs / api # sources""" | if event . parts [ : 2 ] == [ "charge" , "dispute" ] : # Do not attempt to handle charge . dispute . * events .
# We do not have a Dispute model yet .
target_cls = models . Dispute
else :
target_cls = { "charge" : models . Charge , "coupon" : models . Coupon , "invoice" : models . Invoice , "invoiceitem" : models . InvoiceItem , "plan" : models . Plan , "product" : models . Product , "transfer" : models . Transfer , "source" : models . Source , } . get ( event . category )
_handle_crud_like_event ( target_cls = target_cls , event = event ) |
def reduced_to_matrix ( shape , degree , vals_by_weight ) :
r"""Converts a reduced values dictionary into a matrix .
. . note : :
This is a helper used only by : func : ` _ specialize _ surface ` .
The ` ` vals _ by _ weight ` ` mapping has keys of the form :
` ` ( 0 , . . . , 1 , . . . , 2 , . . . ) ` ` where the ` ` 0 ` ` corresponds
to the number of times the first set of barycentric
weights was used in the reduction process , and similarly
for ` ` 1 ` ` and ` ` 2 ` ` .
These points correspond to barycentric weights in their
own right . For example ` ` ( 0 , 0 , 0 , 1 , 2 , 2 ) ` ` corresponds to
the barycentric weight
: math : ` \ left ( \ frac { 3 } { 6 } , \ frac { 1 } { 6 } , \ frac { 2 } { 6 } \ right ) ` .
Once the keys in ` ` vals _ by _ weight ` ` have been converted
to barycentric coordinates , we order them according to
our rule ( bottom to top , left to right ) and then return
them in a single matrix .
Args :
shape ( tuple ) : The shape of the result matrix .
degree ( int ) : The degree of the surface .
vals _ by _ weight ( Mapping [ tuple , numpy . ndarray ] ) : Dictionary
of reduced nodes according to blending of each of the
three sets of weights in a reduction .
Returns :
numpy . ndarray : The newly created reduced control points .""" | result = np . empty ( shape , order = "F" )
index = 0
for k in six . moves . xrange ( degree + 1 ) :
for j in six . moves . xrange ( degree + 1 - k ) :
i = degree - j - k
key = ( 0 , ) * i + ( 1 , ) * j + ( 2 , ) * k
result [ : , index ] = vals_by_weight [ key ] [ : , 0 ]
index += 1
return result |
def get_usrgos_g_hdrgos ( self , hdrgos ) :
"""Return usrgos under provided hdrgos .""" | usrgos_all = set ( )
if isinstance ( hdrgos , str ) :
hdrgos = [ hdrgos ]
for hdrgo in hdrgos :
usrgos_cur = self . hdrgo2usrgos . get ( hdrgo , None )
if usrgos_cur is not None :
usrgos_all |= usrgos_cur
if hdrgo in self . hdrgo_is_usrgo :
usrgos_all . add ( hdrgo )
return usrgos_all |
def status_unpin ( self , id ) :
"""Unpin a pinned status for the logged - in user .
Returns a ` toot dict ` _ with the status that used to be pinned .""" | id = self . __unpack_id ( id )
url = '/api/v1/statuses/{0}/unpin' . format ( str ( id ) )
return self . __api_request ( 'POST' , url ) |
def init ( self ) :
"""Init the connection to the CouchDB server .""" | if not self . export_enable :
return None
if self . user is None :
server_uri = 'http://{}:{}/' . format ( self . host , self . port )
else :
server_uri = 'http://{}:{}@{}:{}/' . format ( self . user , self . password , self . host , self . port )
try :
s = couchdb . Server ( server_uri )
except Exception as e :
logger . critical ( "Cannot connect to CouchDB server %s (%s)" % ( server_uri , e ) )
sys . exit ( 2 )
else :
logger . info ( "Connected to the CouchDB server %s" % server_uri )
try :
s [ self . db ]
except Exception as e : # Database did not exist
# Create it . . .
s . create ( self . db )
else :
logger . info ( "There is already a %s database" % self . db )
return s |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.