signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def execute ( self , debug = False ) :
"""Execute the engine - currently simple executes all workflows ."""
|
if debug : # Set some default times for execution ( debugging )
start_time = datetime ( year = 2016 , month = 10 , day = 19 , hour = 12 , minute = 28 , tzinfo = UTC )
duration = timedelta ( seconds = 5 )
end_time = start_time + duration
relative_interval = RelativeTimeInterval ( 0 , 0 )
time_interval = TimeInterval ( start_time , end_time )
# workflow _ id = " lda _ localisation _ model _ predict "
else :
duration = 0
# not needed
relative_interval = self . hyperstream . config . online_engine . interval
time_interval = relative_interval . absolute ( utcnow ( ) )
for _ in range ( self . hyperstream . config . online_engine . iterations ) :
if not debug : # if this takes more than x minutes , kill myself
signal . alarm ( self . hyperstream . config . online_engine . alarm )
logging . info ( "Online engine starting up." )
# self . hyperstream . workflow _ manager . set _ requested _ intervals ( workflow _ id , TimeIntervals ( [ time _ interval ] ) )
self . hyperstream . workflow_manager . set_all_requested_intervals ( TimeIntervals ( [ time_interval ] ) )
self . hyperstream . workflow_manager . execute_all ( )
logging . info ( "Online engine shutting down." )
logging . info ( "" )
sleep ( self . hyperstream . config . online_engine . sleep )
if debug :
time_interval += duration
else :
time_interval = TimeInterval ( time_interval . end , utcnow ( ) + timedelta ( seconds = relative_interval . end ) )
|
def run ( self , data ) :
"""Returns :
list of W , M * W
ll"""
|
if self . normalize_data :
data = cell_normalize ( data )
M , W , ll = poisson_estimate_state ( data , ** self . params )
outputs = [ ]
if self . return_w :
outputs . append ( W )
if self . return_m :
outputs . append ( M )
if self . return_mw :
outputs . append ( M . dot ( W ) )
if self . return_mds :
X = dim_reduce ( M , W , 2 )
outputs . append ( X . T . dot ( W ) )
return outputs , ll
|
def decode_int ( v ) :
"""decodes and integer from serialization"""
|
if len ( v ) > 0 and ( v [ 0 ] == b'\x00' or v [ 0 ] == 0 ) :
raise Exception ( "No leading zero bytes allowed for integers" )
return big_endian_to_int ( v )
|
def generate_new_cid ( upstream_cid = None ) :
"""Generate a new correlation id , possibly based on the given one ."""
|
if upstream_cid is None :
return str ( uuid . uuid4 ( ) ) if getattr ( settings , 'CID_GENERATE' , False ) else None
if ( getattr ( settings , 'CID_CONCATENATE_IDS' , False ) and getattr ( settings , 'CID_GENERATE' , False ) ) :
return '%s, %s' % ( upstream_cid , str ( uuid . uuid4 ( ) ) )
return upstream_cid
|
def client_update ( self , client , reason = None , pin = None , current_pin = None , verification_speed = None , row_doubling = None , password = None , bypass_expiration = None , bypass_limit = None , bypass_spacing_minutes = None , bypass_code = None , is_disabled = None , verification_lock = None , password_lock = None , enroll_deadline_extension_minutes = None , enroll_deadline_enable = None , windows_profile = None , role_rationale = None , role = None , ) :
"""Update client info
Uses PUT to / clients / < client > interface
: Args :
* * client * : ( str ) Client ' s ID
: Kwargs :
* * reason * : ( str ) The reason for changing the client ' s settings
* * pin * : ( str ) The new PIN to set
* * current _ pin * : ( str ) The current PIN of the user . Only required if role is not admin and the Account Reset Mode ( System Configuration ) requires PIN .
* * verification _ speed * : ( int ) The speed at which the verification should appear for the client . Allowed values : 0 , 25 , 50 , 75 , 100.
* * row _ doubling * : ( str ) Row doubling is an AudioPIN only option that puts two rows of words in each pinpad digit . Allowed values : " OFF " , " TRAIN " , " ON "
* * password * : ( str ) New client password
* * bypass _ expiration * : ( int ) Used to enable / disable a client ' s bypass . The time , in minutes , from when the request was received until the bypass expires . 0 removes the bypass , while - 1 sets a bypass that doesn ' t expire .
* * bypass _ limit * : ( int ) The number of times a user may bypass . Set to 0 for no limit . If set without either an existing valid bypass _ expiration , or providing one in the request , the client ' s bypass _ expiration will be set to 10 mins . Default value : 0 . Size range : > = 0
* * bypass _ spacing _ minutes * : ( int ) Specifies the time , in minutes , the user must wait between using each bypass . Set to 0 for no bypass rate limiting . If set without either an existing valid bypass _ expiration , or providing one in the request , the client ' s bypass _ expiration will be set to 10 mins .
* * bypass _ code * : ( str ) The code that the client must enter to bypass .
* * is _ disabled * : ( bool ) If true , the client cannot do verifications ( will automatically bypass ) .
* * verification _ lock * : ( bool ) Unlocks the given client if the client verified incorrectly too many times .
* * password _ lock * : ( bool ) Set to false to unlock a client who enter thier password incorrectly too many times .
* * enroll _ deadline _ extension _ minutes * : ( int ) Amount of time , in minutes , to extend an enrollment deadline by .
* * enroll _ deadline _ enable * : ( bool ) When true , enables the enrollment deadline for a certain client , when false disables an enrollment deadline .
* * windows _ profile * : ( str ) Assigns a Windows Profile to the user using the Windows Profile ID . To remove a profile , send null .
* * role _ rationale * : ( str ) Update the client rationale for a role
* * role * : ( str ) Update the client role . Note : Google users cannot have their role updated . Allowed values : " admin " , " manager " , " support " , " user " .
: More information : Can be found ` here < https : / / cloud . knuverse . com / docs / api / # api - Clients - Update _ client _ information > ` _ ."""
|
client = self . _client_id ( client )
body = { }
if reason is not None :
body [ "reason" ] = reason
if pin is not None :
body [ "pin" ] = pin
if current_pin is not None :
body [ "current_pin" ] = current_pin
if verification_speed is not None :
body [ "verification_speed" ] = verification_speed
if row_doubling is not None :
body [ "row_doubling" ] = row_doubling
if password is not None :
body [ "auth_password" ] = self . _password
body [ "password" ] = password
if bypass_expiration is not None :
body [ "bypass_expiration" ] = bypass_expiration
if bypass_limit is not None :
body [ "bypass_limit" ] = bypass_limit
if bypass_spacing_minutes is not None :
body [ "bypass_spacing_minutes" ] = bypass_spacing_minutes
if bypass_code is not None :
body [ "bypass_code" ] = bypass_code
if is_disabled is not None :
body [ "is_disabled" ] = is_disabled
if verification_lock is not None :
body [ "verification_lock" ] = verification_lock
if password_lock is not None :
body [ "password_lock" ] = password_lock
if enroll_deadline_extension_minutes is not None :
body [ "enroll_deadline_extension_minutes" ] = enroll_deadline_extension_minutes
if enroll_deadline_enable is not None :
body [ "enroll_deadline_enable" ] = enroll_deadline_enable
if windows_profile is not None :
body [ "windows_profile" ] = windows_profile
if role is not None :
body [ "auth_password" ] = self . _password
body [ "role" ] = role
if role_rationale is not None :
body [ "role_rationale" ] = role_rationale
response = self . _put ( url . clients_id . format ( id = client ) , body = body )
self . _check_response ( response , 200 )
|
def send ( self , obj_id ) :
"""Send email to the assigned lists
: param obj _ id : int
: return : dict | str"""
|
response = self . _client . session . post ( '{url}/{id}/send' . format ( url = self . endpoint_url , id = obj_id ) )
return self . process_response ( response )
|
def group ( self , items , keep_empty = False ) :
"""Given an iterable of instances , groups them by state using : class : ` ManagedState ` instances
as dictionary keys . Returns an ` OrderedDict ` that preserves the order of states from
the source : class : ` ~ coaster . utils . classes . LabeledEnum ` .
: param bool keep _ empty : If ` ` True ` ` , empty states are included in the result"""
|
cls = self . cls if self . cls is not None else type ( self . obj )
# Class of the item being managed
groups = OrderedDict ( )
for mstate in self . statemanager . states_by_value . values ( ) : # Ensure we sort groups using the order of states in the source LabeledEnum .
# We ' ll discard the unused states later .
groups [ mstate ] = [ ]
# Now process the items by state
for item in items : # Use isinstance instead of ` type ( item ) ! = cls ` to account for subclasses
if not isinstance ( item , cls ) :
raise TypeError ( "Item %s is not an instance of type %s" % ( repr ( item ) , repr ( self . cls ) ) )
statevalue = self . statemanager . _value ( item )
mstate = self . statemanager . states_by_value [ statevalue ]
groups [ mstate ] . append ( item )
if not keep_empty :
for key , value in list ( groups . items ( ) ) :
if not value :
del groups [ key ]
return groups
|
def marginalize ( self , variables , inplace = True ) :
"""Marginalize the distribution with respect to the given variables .
Parameters
variables : list , array - like
List of variables to be removed from the marginalized distribution .
inplace : boolean
If inplace = True it will modify the factor itself , else would return
a new CustomDistribution instance .
Returns
Marginalized distribution or None :
if inplace = True ( default ) returns None
if inplace = False returns a new CustomDistribution instance .
Examples
> > > from pgmpy . factors . distributions import CustomDistribution
> > > from scipy . stats import multivariate _ normal
> > > normal _ pdf = lambda x1 , x2 : multivariate _ normal . pdf (
. . . x = [ x1 , x2 ] , mean = [ 0 , 0 ] , cov = [ [ 1 , 0 ] , [ 0 , 1 ] ] )
> > > normal _ dist = CustomDistribution ( variables = [ ' x1 ' , ' x2 ' ] ,
. . . distribution = normal _ pdf )
> > > normal _ dist . variables
[ ' x1 ' , ' x2 ' ]
> > > normal _ dist . assignment ( 1 , 1)
0.058549831524319168
> > > normal _ dist . marginalize ( [ ' x2 ' ] )
> > > normal _ dist . variables
[ ' x1 ' ]
> > > normal _ dist . assignment ( 1)
0.24197072451914328"""
|
if len ( variables ) == 0 :
raise ValueError ( "Shouldn't be calling marginalize over no variable." )
if not isinstance ( variables , ( list , tuple , np . ndarray ) ) :
raise TypeError ( "variables: Expected type iterable, " "got: {var_type}" . format ( var_type = type ( variables ) ) )
for var in variables :
if var not in self . variables :
raise ValueError ( "{var} not in scope." . format ( var = var ) )
phi = self if inplace else self . copy ( )
all_var = [ var for var in self . variables ]
var_to_keep = [ var for var in self . variables if var not in variables ]
reordered_var_index = [ all_var . index ( var ) for var in variables + var_to_keep ]
pdf = phi . _pdf
# The arguments need to be reordered because integrate . nquad
# integrates the first n - arguments of the function passed .
def reordered_pdf ( * args ) : # ordered _ args restores the original order as it was in self . variables
ordered_args = [ args [ reordered_var_index . index ( index_id ) ] for index_id in range ( len ( all_var ) ) ]
return pdf ( * ordered_args )
def marginalized_pdf ( * args ) :
return integrate . nquad ( reordered_pdf , [ [ - np . inf , np . inf ] for i in range ( len ( variables ) ) ] , args = args ) [ 0 ]
phi . _pdf = marginalized_pdf
phi . variables = var_to_keep
if not inplace :
return phi
|
def add_moc_from_dict ( self , moc_dict , moc_options = { } ) :
"""load a MOC from a dict object and display it in Aladin Lite widget
Arguments :
moc _ dict : the dict containing the MOC cells . Key are the HEALPix orders ,
values are the pixel indexes , eg : { " 1 " : [ 1,2,4 ] , " 2 " : [ 12,13,14,21,23,25 ] }
moc _ options : dictionary object"""
|
self . moc_dict = moc_dict
self . moc_options = moc_options
self . moc_from_dict_flag = not self . moc_from_dict_flag
|
def _compute_mean ( self , C , mag , ztor , rrup ) :
"""Compute mean value as in ` ` subroutine getGeom ` ` in ` ` hazgridXnga2 . f ` `"""
|
gc0 = 0.2418
ci = 0.3846
gch = 0.00607
g4 = 1.7818
ge = 0.554
gm = 1.414
mean = ( gc0 + ci + ztor * gch + C [ 'gc1' ] + gm * mag + C [ 'gc2' ] * ( 10 - mag ) ** 3 + C [ 'gc3' ] * np . log ( rrup + g4 * np . exp ( ge * mag ) ) )
return mean
|
def _clear_community_details ( community_details ) :
'''Clears community details .'''
|
for key in [ 'acl' , 'mode' ] :
_str_elem ( community_details , key )
_mode = community_details . get [ 'mode' ] = community_details . get ( 'mode' ) . lower ( )
if _mode in _COMMUNITY_MODE_MAP . keys ( ) :
community_details [ 'mode' ] = _COMMUNITY_MODE_MAP . get ( _mode )
if community_details [ 'mode' ] not in [ 'ro' , 'rw' ] :
community_details [ 'mode' ] = 'ro'
# default is read - only
return community_details
|
def pyle ( argv = None ) :
"""Execute pyle with the specified arguments , or sys . argv if no arguments specified ."""
|
parser = argparse . ArgumentParser ( description = __doc__ )
parser . add_argument ( "-m" , "--modules" , dest = "modules" , action = 'append' , help = "import MODULE before evaluation. May be specified more than once." )
parser . add_argument ( "-i" , "--inplace" , dest = "inplace" , action = 'store_true' , default = False , help = "edit files in place. When used with file name arguments, the files will be replaced by the output of the evaluation" )
parser . add_argument ( "-e" , "--expression" , action = "append" , dest = "expressions" , help = "an expression to evaluate for each line" )
parser . add_argument ( 'files' , nargs = '*' , help = "files to read as input. If used with --inplace, the files will be replaced with the output" )
parser . add_argument ( "--traceback" , action = "store_true" , default = False , help = "print a traceback on stderr when an expression fails for a line" )
args = parser . parse_args ( ) if not argv else parser . parse_args ( argv )
pyle_evaluate ( args . expressions , args . modules , args . inplace , args . files , args . traceback )
|
def suspendMember ( self , clusterId , memberId ) :
"""Parameters :
- clusterId
- memberId"""
|
self . send_suspendMember ( clusterId , memberId )
return self . recv_suspendMember ( )
|
def eigenvectors ( T , k = None , right = True , ncv = None , reversible = False , mu = None ) :
r"""Compute eigenvectors of given transition matrix .
Parameters
T : scipy . sparse matrix
Transition matrix ( stochastic matrix ) .
k : int ( optional ) or array - like
For integer k compute the first k eigenvalues of T
else return those eigenvector sepcified by integer indices in k .
right : bool , optional
If True compute right eigenvectors , left eigenvectors otherwise
ncv : int ( optional )
The number of Lanczos vectors generated , ` ncv ` must be greater than k ;
it is recommended that ncv > 2 * k
reversible : bool , optional
Indicate that transition matrix is reversible
mu : ( M , ) ndarray , optional
Stationary distribution of T
Returns
eigvec : numpy . ndarray , shape = ( d , n )
The eigenvectors of T ordered with decreasing absolute value of
the corresponding eigenvalue . If k is None then n = d , if k is
int then n = k otherwise n is the length of the given indices array .
Notes
Eigenvectors are computed using the scipy interface
to the corresponding ARPACK routines ."""
|
if k is None :
raise ValueError ( "Number of eigenvectors required for decomposition of sparse matrix" )
else :
if reversible :
eigvec = eigenvectors_rev ( T , k , right = right , ncv = ncv , mu = mu )
return eigvec
else :
eigvec = eigenvectors_nrev ( T , k , right = right , ncv = ncv )
return eigvec
|
def ignore ( self , task , * args , ** kw ) :
"""Thread it and forget it .
For information on the arguments to this method , see work ( ) ."""
|
# We want to silence errors
self . callback ( task , null_cb , False , * args , ** kw )
|
def smart_open_write ( path = None , mode = 'wb' , encoding = None ) :
"""Open a file for writing or return ` ` stdout ` ` .
Adapted from StackOverflow user " Wolph "
( http : / / stackoverflow . com / a / 17603000 ) ."""
|
if path is not None : # open a file
fh = io . open ( path , mode = mode , encoding = encoding )
else : # open stdout
fh = io . open ( sys . stdout . fileno ( ) , mode = mode , encoding = encoding )
# fh = sys . stdout
try :
yield fh
finally : # make sure we don ' t close stdout
if fh . fileno ( ) != sys . stdout . fileno ( ) :
fh . close ( )
|
async def _process_lines ( self , pattern : Optional [ str ] = None ) -> None :
"""Read line from pipe they match with pattern ."""
|
if pattern is not None :
cmp = re . compile ( pattern )
_LOGGER . debug ( "Start working with pattern '%s'." , pattern )
# read lines
while self . is_running :
try :
line = await self . _input . readline ( )
if not line :
break
line = line . decode ( )
except Exception : # pylint : disable = broad - except
break
match = True if pattern is None else cmp . search ( line )
if match :
_LOGGER . debug ( "Process: %s" , line )
await self . _que . put ( line )
try :
await self . _loop . run_in_executor ( None , self . _proc . wait )
finally :
await self . _que . put ( None )
_LOGGER . debug ( "Close read ffmpeg output." )
|
def source_amplitude ( self , kwargs_ps , kwargs_lens ) :
"""returns the source amplitudes
: param kwargs _ ps :
: param kwargs _ lens :
: return :"""
|
amp_list = [ ]
for i , model in enumerate ( self . _point_source_list ) :
amp_list . append ( model . source_amplitude ( kwargs_ps = kwargs_ps [ i ] , kwargs_lens = kwargs_lens ) )
return amp_list
|
def install_bootstrapped_files ( nb_path = None , server_config = True , DEBUG = False ) :
"""Installs javascript and exporting server extensions in Jupyter notebook .
Args :
nb _ path ( string ) : Path to notebook module .
server _ config ( boolean ) : Install exporting server extensions .
DEBUG ( boolean ) : Verbose mode ."""
|
install_path = None
print ( 'Starting hide_code.js install...' )
current_dir = path . abspath ( path . dirname ( __file__ ) )
config_dirs = j_path . jupyter_config_path ( )
notebook_module_path = Utils . get_notebook_module_dir ( )
# check for config directory with a " custom " folder
# TODO update this logic to check if custom . js file exists
for dir in config_dirs :
custom_dir = path . join ( dir , "custom" )
if path . isdir ( custom_dir ) :
install_path = custom_dir
break
# last ditch effort in case jupyter config directories don ' t contain custom / custom . js
if install_path == None :
print ( "No config directories contain \"custom\" folder. Trying Jupyter notebook module path..." )
install_path = path . join ( notebook_module_path , "static" , "custom" )
if nb_path != None :
install_path = nb_path
print ( "Using argument supplied path: " + install_path )
if DEBUG :
print ( install_path )
# copy js into static / custom directory in Jupyter / iPython directory
if path . isdir ( install_path ) :
shutil . copyfile ( path . join ( current_dir , "hide_code.js" ) , path . join ( install_path , "hide_code.js" ) )
print ( 'Copying hide_code.js to ' + install_path )
# add require to end of custom . js to auto - load on notebook startup
print ( "Attempting to configure custom.js to auto-load hide_code.js..." )
try :
with open ( path . join ( current_dir , "auto-load.txt" ) ) as auto :
auto_load_txt = auto . read ( ) ;
auto_loaded = False
# check if auto - load . txt is already in custom . js
with open ( path . join ( install_path , "custom.js" ) , 'r' ) as customJS :
if auto_load_txt in customJS . read ( ) :
auto_loaded = True
print ( "Custom.js already configured to auto-load hide_code.js." )
if not auto_loaded : # append auto load require to end of custom . js
with open ( path . join ( install_path , "custom.js" ) , 'a' ) as customJS :
customJS . write ( auto_load_txt )
print ( "Configured custom.js to auto-load hide_code.js." )
except :
print ( "Custom.js not in custom directory." )
else :
print ( 'Unable to install into ' + install_path )
print ( 'Directory doesn\'t exist.' )
print ( 'Make sure Jupyter is installed.' )
if server_config :
print ( "Attempting to configure auto-loading for hide_code export handlers." )
try : # Activate the Python server extension
server_cm = ConfigManager ( config_dir = j_path . jupyter_config_dir ( ) )
cfg = server_cm . get ( 'jupyter_notebook_config' )
server_extensions = ( cfg . setdefault ( 'NotebookApp' , { } ) . setdefault ( 'server_extensions' , [ ] ) )
extension = 'hide_code.hide_code'
if extension not in server_extensions :
cfg [ 'NotebookApp' ] [ 'server_extensions' ] += [ extension ]
server_cm . update ( 'jupyter_notebook_config' , cfg )
print ( 'Configured jupyter to auto-load hide_code export handlers.' )
else :
print ( "Jupyter already configured to auto-load export handlers." )
except :
print ( 'Unable to install server extension.' )
|
def update ( self ) :
"""Draw the scroll bar ."""
|
# Sort out chars
cursor = u"█" if self . _canvas . unicode_aware else "O"
back = u"░" if self . _canvas . unicode_aware else "|"
# Now draw . . .
try :
sb_pos = self . _get_pos ( )
sb_pos = min ( 1 , max ( 0 , sb_pos ) )
sb_pos = max ( int ( self . _height * sb_pos ) - 1 , 0 )
except ZeroDivisionError :
sb_pos = 0
( colour , attr , bg ) = self . _palette [ "scroll" ]
y = self . _canvas . start_line if self . _absolute else 0
for dy in range ( self . _height ) :
self . _canvas . print_at ( cursor if dy == sb_pos else back , self . _x , y + self . _y + dy , colour , attr , bg )
|
def _get_batch_name ( items , skip_jointcheck = False ) :
"""Retrieve the shared batch name for a group of items ."""
|
batch_names = collections . defaultdict ( int )
has_joint = any ( [ is_joint ( d ) for d in items ] )
for data in items :
if has_joint and not skip_jointcheck :
batches = dd . get_sample_name ( data )
else :
batches = dd . get_batches ( data ) or dd . get_sample_name ( data )
if not isinstance ( batches , ( list , tuple ) ) :
batches = [ batches ]
for b in batches :
batch_names [ b ] += 1
return sorted ( batch_names . items ( ) , key = lambda x : x [ - 1 ] , reverse = True ) [ 0 ] [ 0 ]
|
def _get_read_names ( self , search_result , max_range ) :
'''_ get _ read _ names - loops through hmm hits and their alignment spans to
determine if they are potentially linked ( for example , if one gene in a
contig hits a hmm more than once , in two different conserved regions
of that gene ) and combines them into one ' hit ' because they are
technically the same . The total span of the hits deemed to be linked is
returned .
Parameters
search _ result : obj
SequenceSearchResult object with all paramaters defined . Used here
to create rows containing information on alignment direction and
alignment span .
max _ range : int
Maximum range that a gene can extend within a contig . Any hits
that extend beyond this length cannot be linked . max _ range is
set as 1.5 X the average length of all full length genes used
in the search database . This is defined in the CONTENTS . json file
within a gpkg .
Returns
Dictionary where keys are the contig / read name . The value for each
entry is an array lists , one per hit in each contig , each with the
span ( min and max ) of the alignment .'''
|
splits = { }
# Define an output dictionary to be filled
spans = [ ]
for result in search_result : # Create a table ( list of rows contain span , and complement information
spans += list ( result . each ( [ SequenceSearchResult . QUERY_ID_FIELD , SequenceSearchResult . ALIGNMENT_DIRECTION , SequenceSearchResult . HIT_FROM_FIELD , SequenceSearchResult . HIT_TO_FIELD , SequenceSearchResult . QUERY_FROM_FIELD , SequenceSearchResult . QUERY_TO_FIELD ] ) )
for hit in spans : # For each of these rows ( i . e . hits )
i = hit [ 0 ]
# set id to i
c = hit [ 1 ]
# set complement to c
ft = [ min ( hit [ 2 : 4 ] ) , max ( hit [ 2 : 4 ] ) ]
# set span as ft ( i . e . from - to ) - This is the amount covering the query
qs = [ min ( hit [ 4 : 6 ] ) , max ( hit [ 4 : 6 ] ) ]
# seq the query span to qs - This is the amount covering the HMM
if ft [ 0 ] == ft [ 1 ] :
continue
# if the span covers none of the query , skip that entry ( seen this before )
if i not in splits : # If the hit hasnt been seen yet
splits [ i ] = { 'span' : [ ft ] , 'strand' : [ c ] , 'query_span' : [ qs ] }
# add the span and complement as new entry
else : # otherwise ( if it has been seen )
for idx , entry in enumerate ( splits [ i ] [ 'span' ] ) : # for each previously added entry
if splits [ i ] [ 'strand' ] [ idx ] != c : # If the hit is on the same complement strand
splits [ i ] [ 'span' ] . append ( ft )
# Add the new range to be split out in the future
splits [ i ] [ 'strand' ] . append ( c )
# Add the complement strand as well
splits [ i ] [ 'query_span' ] . append ( qs )
break
previous_qs = splits [ i ] [ 'query_span' ] [ idx ]
# Get the query span of the previous hit
previous_q_range = set ( range ( previous_qs [ 0 ] , previous_qs [ 1 ] ) )
# Get the range of each
current_q_range = set ( range ( qs [ 0 ] , qs [ 1 ] ) )
query_overlap = set ( previous_q_range ) . intersection ( current_q_range )
# Find the intersection between the two ranges
previous_ft_span = set ( range ( entry [ 0 ] , entry [ 1 ] ) )
current_ft_span = set ( range ( ft [ 0 ] , ft [ 1 ] ) )
if any ( query_overlap ) : # If there is an overlap
# if the span over the actual read that hit the HMM
# for each hit overlap by > 25 % , they are considered
# the same hit , and ignored
intersection_fraction = float ( len ( previous_ft_span . intersection ( current_ft_span ) ) )
if intersection_fraction / float ( len ( previous_ft_span ) ) >= PREVIOUS_SPAN_CUTOFF :
break
elif intersection_fraction / float ( len ( current_ft_span ) ) >= PREVIOUS_SPAN_CUTOFF :
break
else : # else ( i . e . if the hit covers less that 25 % of the sequence of the previous hit )
# If they made it this far , it means that the hits do not overlap .
# But one last check must be made to ensure they do not cover the same
# region in the HMM .
if len ( query_overlap ) > ( len ( current_q_range ) * PREVIOUS_SPAN_CUTOFF ) : # if the overlap on the query HMM does not span over 25%
if ( idx + 1 ) == len ( splits [ i ] [ 'span' ] ) :
splits [ i ] [ 'span' ] . append ( ft )
# Add from - to as another entry , this is another hit .
splits [ i ] [ 'strand' ] . append ( c )
# Add strand info as well
splits [ i ] [ 'query_span' ] . append ( qs )
break
# And break
if min ( entry ) < min ( ft ) : # if / else to determine which entry comes first ( e . g . 1-5 , 6-10 not 6-10 , 1-5)
if max ( ft ) - min ( entry ) < max_range : # Check if they lie within range of eachother
entry [ 1 ] = max ( ft )
# ammend the entry if they are
break
# And break the loop
else :
if max ( entry ) - min ( ft ) < max_range : # Check if they lie within range of eachother
entry [ 0 ] = min ( ft )
# ammend the entry if they are
break
# And break the loop
else : # if no break occured ( no overlap )
splits [ i ] [ 'span' ] . append ( ft )
# Add the new range to be split out in the future
splits [ i ] [ 'strand' ] . append ( c )
# Add the complement strand as well
splits [ i ] [ 'query_span' ] . append ( qs )
return { key : { "entry" : entry [ 'span' ] , 'strand' : entry [ 'strand' ] } for key , entry in splits . iteritems ( ) }
|
async def profile ( self ) :
"""| coro |
Gets the user ' s profile .
. . note : :
This only applies to non - bot accounts .
Raises
Forbidden
Not allowed to fetch profiles .
HTTPException
Fetching the profile failed .
Returns
: class : ` Profile `
The profile of the user ."""
|
state = self . _state
data = await state . http . get_user_profile ( self . id )
def transform ( d ) :
return state . _get_guild ( int ( d [ 'id' ] ) )
since = data . get ( 'premium_since' )
mutual_guilds = list ( filter ( None , map ( transform , data . get ( 'mutual_guilds' , [ ] ) ) ) )
return Profile ( flags = data [ 'user' ] . get ( 'flags' , 0 ) , premium_since = parse_time ( since ) , mutual_guilds = mutual_guilds , user = self , connected_accounts = data [ 'connected_accounts' ] )
|
def check_2d ( inp ) :
"""Check input to be a matrix . Converts lists of lists to np . ndarray .
Also allows the input to be a scipy sparse matrix .
Parameters
inp : obj
Input matrix
Returns
numpy . ndarray , scipy . sparse or None
Input matrix or None
Examples
> > > check _ 2d ( [ [ 0 , 1 ] , [ 2 , 3 ] ] )
[ [ 0 , 1 ] , [ 2 , 3 ] ]
> > > check _ 2d ( ' test ' )
None"""
|
if isinstance ( inp , list ) :
return check_2d ( np . array ( inp ) )
if isinstance ( inp , ( np . ndarray , np . matrixlib . defmatrix . matrix ) ) :
if inp . ndim == 2 : # input is a dense matrix
return inp
if sps . issparse ( inp ) :
if inp . ndim == 2 : # input is a sparse matrix
return inp
|
def summarize ( self , test_arr , vectorizable_token , sentence_list , limit = 5 ) :
'''Summarize input document .
Args :
test _ arr : ` np . ndarray ` of observed data points . .
vectorizable _ token : is - a ` VectorizableToken ` .
sentence _ list : ` list ` of all sentences .
limit : The number of selected abstract sentence .
Returns :
` np . ndarray ` of scores .'''
|
if isinstance ( vectorizable_token , VectorizableToken ) is False :
raise TypeError ( )
_ = self . inference ( test_arr )
score_arr = self . __encoder_decoder_controller . get_reconstruction_error ( )
score_arr = score_arr . reshape ( ( score_arr . shape [ 0 ] , - 1 ) ) . mean ( axis = 1 )
score_list = score_arr . tolist ( )
abstract_list = [ ]
for i in range ( limit ) :
if self . __normal_prior_flag is True :
key = score_arr . argmin ( )
else :
key = score_arr . argmax ( )
score = score_list . pop ( key )
score_arr = np . array ( score_list )
seq_arr = test_arr [ key ]
token_arr = vectorizable_token . tokenize ( seq_arr . tolist ( ) )
s = " " . join ( token_arr . tolist ( ) )
_s = "" . join ( token_arr . tolist ( ) )
for sentence in sentence_list :
if s in sentence or _s in sentence :
abstract_list . append ( sentence )
abstract_list = list ( set ( abstract_list ) )
if len ( abstract_list ) >= limit :
break
return abstract_list
|
def exec ( self , * command_tokens , ** command_env ) :
""": meth : ` . WCommandProto . exec ` implementation"""
|
mutated_command_tokens = self . mutate_command_tokens ( * command_tokens )
if mutated_command_tokens is not None :
command = self . selector ( ) . select ( * mutated_command_tokens , ** command_env )
if command is not None :
return command . exec ( * mutated_command_tokens , ** command_env )
raise RuntimeError ( 'Command mismatch: %s' % self . join_tokens ( * command_tokens ) )
|
def get_list ( self , search = '' , start = 0 , limit = 0 , order_by = '' , order_by_dir = 'ASC' , published_only = False , minimal = False ) :
"""Get a list of items
: param search : str
: param start : int
: param limit : int
: param order _ by : str
: param order _ by _ dir : str
: param published _ only : bool
: param minimal : bool
: return : dict | str"""
|
parameters = { }
args = [ 'search' , 'start' , 'limit' , 'minimal' ]
for arg in args :
if arg in locals ( ) and locals ( ) [ arg ] :
parameters [ arg ] = locals ( ) [ arg ]
if order_by :
parameters [ 'orderBy' ] = order_by
if order_by_dir :
parameters [ 'orderByDir' ] = order_by_dir
if published_only :
parameters [ 'publishedOnly' ] = 'true'
response = self . _client . session . get ( self . endpoint_url , params = parameters )
return self . process_response ( response )
|
def get_arp_output_arp_entry_age ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
get_arp = ET . Element ( "get_arp" )
config = get_arp
output = ET . SubElement ( get_arp , "output" )
arp_entry = ET . SubElement ( output , "arp-entry" )
ip_address_key = ET . SubElement ( arp_entry , "ip-address" )
ip_address_key . text = kwargs . pop ( 'ip_address' )
age = ET . SubElement ( arp_entry , "age" )
age . text = kwargs . pop ( 'age' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def _build_primitive_cell ( self ) :
"""primitive _ matrix :
Relative axes of primitive cell to the input unit cell .
Relative axes to the supercell is calculated by :
supercell _ matrix ^ - 1 * primitive _ matrix
Therefore primitive cell lattice is finally calculated by :
( supercell _ lattice * ( supercell _ matrix ) ^ - 1 * primitive _ matrix ) ^ T"""
|
self . _primitive = self . _get_primitive_cell ( self . _supercell , self . _supercell_matrix , self . _primitive_matrix )
|
def __field_to_parameter_type ( self , field ) :
"""Converts the field variant type into a string describing the parameter .
Args :
field : An instance of a subclass of messages . Field .
Returns :
A string corresponding to the variant enum of the field , with a few
exceptions . In the case of signed ints , the ' s ' is dropped ; for the BOOL
variant , ' boolean ' is used ; and for the ENUM variant , ' string ' is used .
Raises :
TypeError : if the field variant is a message variant ."""
|
# We use lowercase values for types ( e . g . ' string ' instead of ' STRING ' ) .
variant = field . variant
if variant == messages . Variant . MESSAGE :
raise TypeError ( 'A message variant can\'t be used in a parameter.' )
custom_variant_map = { messages . Variant . SINT32 : 'int32' , messages . Variant . SINT64 : 'int64' , messages . Variant . BOOL : 'boolean' , messages . Variant . ENUM : 'string' , }
return custom_variant_map . get ( variant ) or variant . name . lower ( )
|
def map ( func , data , num_workers = None ) : # type : ( callable , Iterable , Optional [ int ] ) - > Iterable
"""Map an iterable using multithreading
> > > s = pd . Series ( range ( 120 , 0 , - 1 ) )
> > > s2 = map ( lambda i , x : x * * 3.75 , s )
> > > isinstance ( s2 , type ( s ) )
True
> > > len ( s ) = = len ( s2)
True
> > > ( s2 = = s . map ( lambda x : x * * 3.75 ) ) . all ( )
True
> > > s = list ( range ( 120 , 0 , - 1 ) )
> > > s2 = map ( lambda i , x : x * * 3.75 , s )
> > > isinstance ( s2 , type ( s ) )
True
> > > len ( s ) = = len ( s2)
True
> > > all ( x * * 3.75 = = s2 [ i ] for i , x in enumerate ( s ) )
True
> > > s = dict ( enumerate ( range ( 120 , 0 , - 1 ) ) )
> > > s2 = map ( lambda i , x : x * * 3.75 , s )
> > > isinstance ( s2 , type ( s ) )
True
> > > len ( s ) = = len ( s2)
True
> > > all ( x * * 3.75 = = s2 [ i ] for i , x in s . items ( ) )
True"""
|
backend = ThreadPool ( n_workers = num_workers )
iterable = None
# pd . Series didn ' t have . items ( ) until pandas 0.21,
# so iteritems for older versions
for method in ( 'iterrows' , 'iteritems' , 'items' ) :
if hasattr ( data , method ) :
iterable = getattr ( data , method ) ( )
break
if iterable is None :
iterable = enumerate ( data )
mapped = { }
def collect ( key ) :
def process ( res ) :
mapped [ key ] = res
return process
for key , value in iterable :
backend . submit ( func , key , value , callback = collect ( key ) )
backend . shutdown ( )
if isinstance ( data , pd . DataFrame ) :
return pd . DataFrame . from_dict ( mapped , orient = 'index' ) . reindex ( data . index )
elif isinstance ( data , pd . Series ) :
return pd . Series ( mapped ) . reindex ( data . index )
elif isinstance ( data , list ) :
return [ mapped [ i ] for i in range ( len ( data ) ) ]
else : # in Python , hash ( < int > ) : = < int > , so guaranteed to be in order for list
# and tuple . For other types
return type ( data ) ( mapped )
|
def term_to_binary ( term , compressed = False ) :
"""Encode Python types into Erlang terms in binary data"""
|
data_uncompressed = _term_to_binary ( term )
if compressed is False :
return b_chr ( _TAG_VERSION ) + data_uncompressed
else :
if compressed is True :
compressed = 6
if compressed < 0 or compressed > 9 :
raise InputException ( 'compressed in [0..9]' )
data_compressed = zlib . compress ( data_uncompressed , compressed )
size_uncompressed = len ( data_uncompressed )
if size_uncompressed > 4294967295 :
raise OutputException ( 'uint32 overflow' )
return ( b_chr ( _TAG_VERSION ) + b_chr ( _TAG_COMPRESSED_ZLIB ) + struct . pack ( b'>I' , size_uncompressed ) + data_compressed )
|
def executable_path ( conn , executable ) :
"""Remote validator that accepts a connection object to ensure that a certain
executable is available returning its full path if so .
Otherwise an exception with thorough details will be raised , informing the
user that the executable was not found ."""
|
executable_path = conn . remote_module . which ( executable )
if not executable_path :
raise ExecutableNotFound ( executable , conn . hostname )
return executable_path
|
def get_param_arg ( param , idx , klass , arg , attr = 'id' ) :
"""Return the correct value for a fabric from ` arg ` ."""
|
if isinstance ( arg , klass ) :
return getattr ( arg , attr )
elif isinstance ( arg , ( int , str ) ) :
return arg
else :
raise TypeError ( "%s[%d] must be int, str, or %s, not %s" % ( param , idx , klass . __name__ , type ( arg ) . __name__ ) )
|
def convert_date ( date ) :
"""Convert string to datetime object ."""
|
date = convert_month ( date , shorten = False )
clean_string = convert_string ( date )
return datetime . strptime ( clean_string , DATE_FMT . replace ( '-' , '' ) )
|
def del_export ( exports = '/etc/exports' , path = None ) :
'''Remove an export
CLI Example :
. . code - block : : bash
salt ' * ' nfs . del _ export / media / storage'''
|
edict = list_exports ( exports )
del edict [ path ]
_write_exports ( exports , edict )
return edict
|
async def send_pages ( self ) :
"""A helper utility to send the page output from : attr : ` paginator ` to the destination ."""
|
destination = self . get_destination ( )
for page in self . paginator . pages :
await destination . send ( page )
|
def create_tags ( user ) :
"""Create a tag ."""
|
values = { 'id' : utils . gen_uuid ( ) , 'created_at' : datetime . datetime . utcnow ( ) . isoformat ( ) }
values . update ( schemas . tag . post ( flask . request . json ) )
with flask . g . db_conn . begin ( ) :
where_clause = sql . and_ ( _TABLE . c . name == values [ 'name' ] )
query = sql . select ( [ _TABLE . c . id ] ) . where ( where_clause )
if flask . g . db_conn . execute ( query ) . fetchone ( ) :
raise dci_exc . DCIConflict ( 'Tag already exists' , values )
# create the label / value row
query = _TABLE . insert ( ) . values ( ** values )
flask . g . db_conn . execute ( query )
result = json . dumps ( { 'tag' : values } )
return flask . Response ( result , 201 , content_type = 'application/json' )
|
def from_dict ( cls , d ) :
"""Convert a dictionary into an xarray . DataArray
Input dict can take several forms : :
d = { ' dims ' : ( ' t ' ) , ' data ' : x }
d = { ' coords ' : { ' t ' : { ' dims ' : ' t ' , ' data ' : t ,
' attrs ' : { ' units ' : ' s ' } } } ,
' attrs ' : { ' title ' : ' air temperature ' } ,
' dims ' : ' t ' ,
' data ' : x ,
' name ' : ' a ' }
where ' t ' is the name of the dimesion , ' a ' is the name of the array ,
and x and t are lists , numpy . arrays , or pandas objects .
Parameters
d : dict , with a minimum structure of { ' dims ' : [ . . ] , ' data ' : [ . . ] }
Returns
obj : xarray . DataArray
See also
DataArray . to _ dict
Dataset . from _ dict"""
|
coords = None
if 'coords' in d :
try :
coords = OrderedDict ( [ ( k , ( v [ 'dims' ] , v [ 'data' ] , v . get ( 'attrs' ) ) ) for k , v in d [ 'coords' ] . items ( ) ] )
except KeyError as e :
raise ValueError ( "cannot convert dict when coords are missing the key " "'{dims_data}'" . format ( dims_data = str ( e . args [ 0 ] ) ) )
try :
data = d [ 'data' ]
except KeyError :
raise ValueError ( "cannot convert dict without the key 'data''" )
else :
obj = cls ( data , coords , d . get ( 'dims' ) , d . get ( 'name' ) , d . get ( 'attrs' ) )
return obj
|
def select ( files , start , stop ) :
"""Helper function for handling start and stop indices"""
|
if start or stop :
if start is None :
start = 0
if stop is None :
stop = len ( files )
files = files [ start : stop ]
return files
|
def current_fact_index ( self ) :
"""Current fact index in the self . facts list ."""
|
facts_ids = [ fact . id for fact in self . facts ]
return facts_ids . index ( self . current_fact . id )
|
def _load_from_ini_py2 ( ini ) :
"""py2从单个配置文件中 , 获取设置
: param :
: param ini :
: return :"""
|
logger . debug ( '使用PY2不支持自定义default_section,其默认值是:%s' % _DEFAULT_SECTION )
cf = configparser . ConfigParser ( )
cf . read ( ini )
settings = OrderedDict ( )
for k , v in cf . defaults ( ) . items ( ) :
settings [ k . upper ( ) ] = convert_value ( v )
cf . _defaults = { }
for section in cf . sections ( ) :
section_dict = OrderedDict ( )
for option in cf . items ( section ) :
section_dict [ option [ 0 ] ] = option [ 1 ]
settings [ section ] = section_dict
return settings
|
def atlasdb_renew_peer ( peer_hostport , now , con = None , path = None ) :
"""Renew a peer ' s discovery time"""
|
with AtlasDBOpen ( con = con , path = path ) as dbcon :
if now is None :
now = time . time ( )
sql = "UPDATE peers SET discovery_time = ? WHERE peer_hostport = ?;"
args = ( now , peer_hostport )
cur = dbcon . cursor ( )
res = atlasdb_query_execute ( cur , sql , args )
dbcon . commit ( )
return True
|
def ask_string ( message = 'Enter something.' , default = '' , title = '' ) :
"""Show a box in which a user can enter some text .
You may optionally specify some default text , which will appear in the
entry - box when it is displayed .
Returns the text that the user entered , or None if he cancels the operation
: ref : ` screenshots < ask _ string > `
: param message : message to be displayed .
: param title : window title
: param default : entry - box default string
: rtype : None or string"""
|
return backend_api . opendialog ( "ask_string" , dict ( message = message , default = default , title = title ) )
|
def _ensure_parameters ( self ) :
"""Attempts to load and verify the CTE node parameters . Will use
default values for all missing parameters , and raise an exception if
a parameter ' s value cannot be verified . This method will only
perform these actions once , and set the : attr : ` _ parameters _ checked `
attribute to ` ` True ` ` upon its first success ."""
|
if hasattr ( self , "_parameters_checked" ) :
return
if ( not hasattr ( self . model , "_cte_node_table" ) or self . model . _cte_node_table is None ) :
setattr ( self . model , "_cte_node_table" , self . DEFAULT_TABLE_NAME )
if ( not hasattr ( self . model , "_cte_node_depth" ) or self . model . _cte_node_depth is None ) :
setattr ( self . model , "_cte_node_depth" , self . VIRTUAL_FIELD_DEPTH )
if ( not hasattr ( self . model , "_cte_node_path" ) or self . model . _cte_node_depth is None ) :
setattr ( self . model , "_cte_node_path" , self . VIRTUAL_FIELD_PATH )
if ( not hasattr ( self . model , "_cte_node_ordering" ) or self . model . _cte_node_ordering is None ) :
setattr ( self . model , "_cte_node_ordering" , self . VIRTUAL_FIELD_ORDERING )
if ( not hasattr ( self . model , "_cte_node_traversal" ) or self . model . _cte_node_traversal is None ) :
setattr ( self . model , "_cte_node_traversal" , self . DEFAULT_TREE_TRAVERSAL )
if ( not hasattr ( self . model , "_cte_node_children" ) or self . model . _cte_node_children is None ) :
setattr ( self . model , "_cte_node_children" , self . DEFAULT_CHILDREN_NAME )
if not hasattr ( self . model , "_cte_node_primary_key_type" ) :
setattr ( self . model , "_cte_node_primary_key_type" , None )
# Determine the parent foreign key field name , either
# explicitly specified , or the first foreign key to ' self ' .
# If we need to determine , then we set the attribute for future
# reference .
if ( not hasattr ( self . model , "_cte_node_parent" ) or self . model . _cte_node_parent is None ) :
found = False
for f in self . model . _meta . fields :
if isinstance ( f , ForeignKey ) :
if f . remote_field . model == self . model :
setattr ( self . model , "_cte_node_parent" , f . name )
found = True
if not found :
raise ImproperlyConfigured ( _ ( "CTENode must have a Foreign Key to self for the parent " "relation." ) )
try :
parent_field = self . model . _meta . get_field ( self . model . _cte_node_parent )
except FieldDoesNotExist :
raise ImproperlyConfigured ( "" . join ( [ _ ( "CTENode._cte_node_parent must specify a Foreign Key" " to self, instead it is: " ) , self . model . _cte_node_parent , ] ) )
# Ensure parent relation is a Foreign Key to self .
if not parent_field . remote_field . model == self . model :
raise ImproperlyConfigured ( "" . join ( [ _ ( "CTENode._cte_node_parent must specify a Foreign Key" " to self, instead it is: " ) , self . model . _cte_node_parent , ] ) )
# Record the parent field attribute name for future reference .
setattr ( self . model , "_cte_node_parent_attname" , self . model . _meta . get_field ( self . model . _cte_node_parent ) . attname , )
# Ensure traversal choice is valid .
traversal_choices = [ choice [ 0 ] for choice in self . TREE_TRAVERSAL_CHOICES ]
if self . model . _cte_node_traversal not in traversal_choices :
raise ImproperlyConfigured ( " " . join ( [ "CTENode._cte_node_traversal must be one of [" , ", " . join ( traversal_choices ) , "]; instead it is:" , self . model . _cte_node_traversal , ] ) )
# Ensure delete choice is valid .
if ( not hasattr ( self . model , "_cte_node_delete_method" ) or self . model . _cte_node_delete_method is None ) :
setattr ( self . model , "_cte_node_delete_method" , self . DEFAULT_DELETE_METHOD )
else : # Ensure specified method is valid .
method_choices = [ dm [ 0 ] for dm in self . DELETE_METHOD_CHOICES ]
if self . model . _cte_node_delete_method not in method_choices :
raise ImproperlyConfigured ( " " . join ( [ "delete method must be one of [" , ", " . join ( method_choices ) , "]; instead it is:" , self . model . _cte_node_delete_method , ] ) )
setattr ( self , "_parameters_checked" , True )
|
def build_maps ( self , losses , clp , stats = ( ) ) :
""": param losses : an array of shape ( A , R , P )
: param clp : a list of C conditional loss poes
: param stats : list of pairs [ ( statname , statfunc ) , . . . ]
: returns : an array of loss _ maps of shape ( A , R , C , LI )"""
|
shp = losses . shape [ : 2 ] + ( len ( clp ) , len ( losses . dtype ) )
# ( A , R , C , LI )
array = numpy . zeros ( shp , F32 )
for lti , lt in enumerate ( losses . dtype . names ) :
for a , losses_ in enumerate ( losses [ lt ] ) :
for r , ls in enumerate ( losses_ ) :
for c , poe in enumerate ( clp ) :
clratio = conditional_loss_ratio ( ls , self . poes , poe )
array [ a , r , c , lti ] = clratio
return self . pair ( array , stats )
|
def check ( self , state , * args , ** kwargs ) :
"""Check if this engine can be used for execution on the current state . A callback ` check _ failure ` is called upon
failed checks . Note that the execution can still fail even if check ( ) returns True .
You should only override this method in a subclass in order to provide the correct method signature and
docstring . You should override the ` ` _ check ` ` method to do your actual execution .
: param SimState state : The state with which to execute .
: param args : Positional arguments that will be passed to process ( ) .
: param kwargs : Keyword arguments that will be passed to process ( ) .
: return : True if the state can be handled by the current engine , False otherwise ."""
|
return self . _check ( state , * args , ** kwargs )
|
def prepend ( self , symbol , metadata , start_time = None ) :
"""Prepend a metadata entry for ` symbol `
Parameters
symbol : ` str `
symbol name for the item
metadata : ` dict `
to be persisted
start _ time : ` datetime . datetime `
when metadata becomes effective
Default : datetime . datetime . min"""
|
if metadata is None :
return
if start_time is None :
start_time = dt . min
old_metadata = self . find_one ( { 'symbol' : symbol } , sort = [ ( 'start_time' , pymongo . ASCENDING ) ] )
if old_metadata is not None :
if old_metadata [ 'start_time' ] <= start_time :
raise ValueError ( 'start_time={} is later than the first metadata @{}' . format ( start_time , old_metadata [ 'start_time' ] ) )
if old_metadata [ 'metadata' ] == metadata :
self . find_one_and_update ( { 'symbol' : symbol } , { '$set' : { 'start_time' : start_time } } , sort = [ ( 'start_time' , pymongo . ASCENDING ) ] )
old_metadata [ 'start_time' ] = start_time
return old_metadata
end_time = old_metadata . get ( 'start_time' )
else :
end_time = None
document = { '_id' : bson . ObjectId ( ) , 'symbol' : symbol , 'metadata' : metadata , 'start_time' : start_time }
if end_time is not None :
document [ 'end_time' ] = end_time
mongo_retry ( self . insert_one ) ( document )
logger . debug ( 'Finished writing metadata for %s' , symbol )
return document
|
def curve4_bezier ( p1 , p2 , p3 , p4 ) :
"""Generate the vertices for a third order Bezier curve .
The vertices returned by this function can be passed to a LineVisual or
ArrowVisual .
Parameters
p1 : array
2D coordinates of the start point
p2 : array
2D coordinates of the first curve point
p3 : array
2D coordinates of the second curve point
p4 : array
2D coordinates of the end point
Returns
coords : list
Vertices for the Bezier curve .
See Also
curve3 _ bezier
Notes
For more information about Bezier curves please refer to the ` Wikipedia ` _
page .
. . _ Wikipedia : https : / / en . wikipedia . org / wiki / B % C3 % A9zier _ curve"""
|
x1 , y1 = p1
x2 , y2 = p2
x3 , y3 = p3
x4 , y4 = p4
points = [ ]
_curve4_recursive_bezier ( points , x1 , y1 , x2 , y2 , x3 , y3 , x4 , y4 )
dx , dy = points [ 0 ] [ 0 ] - x1 , points [ 0 ] [ 1 ] - y1
if ( dx * dx + dy * dy ) > 1e-10 :
points . insert ( 0 , ( x1 , y1 ) )
dx , dy = points [ - 1 ] [ 0 ] - x4 , points [ - 1 ] [ 1 ] - y4
if ( dx * dx + dy * dy ) > 1e-10 :
points . append ( ( x4 , y4 ) )
return np . array ( points ) . reshape ( len ( points ) , 2 )
|
def bbox ( self ) :
"""Bounding box as minimum and maximum coordinates ."""
|
mn = amin ( self . coordinates , axis = 0 )
mx = amax ( self . coordinates , axis = 0 )
return concatenate ( ( mn , mx ) )
|
def place_object ( self , object , column = None , row = None , column_span = 1 , row_span = 1 , alignment = 1 ) :
"""This adds either one of our simplified objects or a QWidget to the
grid at the specified position , appends the object to self . objects .
alignment = 0 Fill the space .
alignment = 1 Left - justified .
alignment = 2 Right - justified .
If column isn ' t specified , the new object will be placed in a new column ."""
|
# pick a column
if column == None :
column = self . _auto_column
self . _auto_column += 1
# pick a row
if row == None :
row = self . _auto_row
# create the object
self . objects . append ( object )
# add the widget to the layout
try :
object . _widget
widget = object . _widget
# allows the user to specify a standard widget
except :
widget = object
self . _layout . addWidget ( widget , row , column , row_span , column_span , _g . Qt . QtCore . Qt . Alignment ( alignment ) )
# try to store the parent object ( self ) in the placed object
try :
object . set_parent ( self )
except :
None
return object
|
def rpc_get_num_names ( self , ** con_info ) :
"""Get the number of names that exist and are not expired
Return { ' status ' : True , ' count ' : count } on success
Return { ' error ' : . . . } on error"""
|
db = get_db_state ( self . working_dir )
num_names = db . get_num_names ( )
db . close ( )
return self . success_response ( { 'count' : num_names } )
|
def wait_until_element_present ( self , element , timeout = None ) :
"""Search element and wait until it is found
: param element : PageElement or element locator as a tuple ( locator _ type , locator _ value ) to be found
: param timeout : max time to wait
: returns : the web element if it is present
: rtype : selenium . webdriver . remote . webelement . WebElement or appium . webdriver . webelement . WebElement
: raises TimeoutException : If the element is not found after the timeout"""
|
return self . _wait_until ( self . _expected_condition_find_element , element , timeout )
|
def oregontrail ( channel , nick , rest ) :
"It ' s edutainment !"
|
rest = rest . strip ( )
if rest :
who = rest . strip ( )
else :
who = random . choice ( [ nick , channel , 'pmxbot' ] )
action = random . choice ( phrases . otrail_actions )
if action in ( 'has' , 'has died from' ) :
issue = random . choice ( phrases . otrail_issues )
text = '%s %s %s.' % ( who , action , issue )
else :
text = '%s %s' % ( who , action )
return text
|
def collect_api_results ( input_data , url , headers , api , batch_size , kwargs ) :
"""Optionally split up a single request into a series of requests
to ensure timely HTTP responses .
Could eventually speed up the time required to receive a response by
sending batches to the indico API concurrently"""
|
if batch_size :
results = [ ]
for batch in batched ( input_data , size = batch_size ) :
try :
result = send_request ( batch , api , url , headers , kwargs )
if isinstance ( result , list ) :
results . extend ( result )
else :
results . append ( result )
except IndicoError as e : # Log results so far to file
timestamp = datetime . datetime . now ( ) . strftime ( '%Y-%m-%d-%H:%M:%S' )
filename = "indico-{api}-{timestamp}.json" . format ( api = api , timestamp = timestamp )
if sys . version_info > ( 3 , 0 ) :
json . dump ( results , open ( filename , mode = 'w' , encoding = 'utf-8' ) , cls = NumpyEncoder )
else :
json . dump ( results , open ( filename , mode = 'w' ) , cls = NumpyEncoder )
raise BatchProcessingError ( "The following error occurred while processing your data: `{err}` " "Partial results have been saved to {filename}" . format ( err = e , filename = os . path . abspath ( filename ) ) )
return results
else :
return send_request ( input_data , api , url , headers , kwargs )
|
def _create_sata_controllers ( sata_controllers ) :
'''Returns a list of vim . vm . device . VirtualDeviceSpec objects representing
SATA controllers
sata _ controllers
SATA properties'''
|
sata_ctrls = [ ]
keys = range ( - 15000 , - 15050 , - 1 )
if sata_controllers :
devs = [ sata [ 'adapter' ] for sata in sata_controllers ]
log . trace ( 'Creating SATA controllers %s' , devs )
for sata , key in zip ( sata_controllers , keys ) :
sata_ctrls . append ( _apply_sata_controller_config ( sata [ 'adapter' ] , 'add' , key , sata [ 'bus_number' ] ) )
return sata_ctrls
|
def _update ( self , data ) :
'''Update the line using the blob of json - parsed data directly from the
API .'''
|
self . bullet = data [ 'bullet' ]
self . level = data [ 'level' ]
self . text = WikiText ( data [ 'text_raw' ] , data [ 'text_rendered' ] )
|
def _process_ddg2p_annotations ( self , limit ) :
"""The ddg2p annotations associate a gene symbol to an omim disease ,
along with some HPO ids and pubs . The gene symbols come from gencode ,
which in turn come from HGNC official gene symbols . Therefore ,
we use the HGNC source class to get the id / symbol mapping for
use in our annotations here .
According to http : / / www . gencodegenes . org / faq . html ,
" Gene names are usually HGNC or MGI - approved gene symbols mapped
to the GENCODE genes by the Ensembl xref pipeline . Sometimes ,
when there is no official gene symbol , the Havana clone - based
name is used . "
The kind of variation that is linked to a disease is indicated
( LOF , GOF , CNV , etc ) in the source data .
Here , we create an anonymous variant of the specified gene of
the indicated type ( mapped to the sequence ontology ( SO ) ) .
: param limit :
: return :"""
|
line_counter = 0
if self . graph is not None :
graph = self . graph
else :
graph = self . graph
# in order for this to work , we need to map the HGNC id - symbol ;
hgnc = HGNC ( self . graph_type , self . are_bnodes_skolemized )
hgnc_symbol_id_map = hgnc . get_symbol_id_map ( )
myzip = ZipFile ( '/' . join ( ( self . rawdir , self . files [ 'annot' ] [ 'file' ] ) ) , 'r' )
# use the ddg2p . txt file
fname = 'ddg2p.txt'
unmapped_omim_counter = 0
unmapped_gene_count = 0
with myzip . open ( fname , 'r' ) as f :
f = io . TextIOWrapper ( f )
reader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' )
# score _ means _ by _ measure = { }
# strain _ scores _ by _ measure = { } # TODO theseare unused
for row in reader :
line_counter += 1
if re . match ( r'#' , row [ 0 ] ) : # skip comments
continue
( gencode_gene_name , mode , category , consequence , disease , omim , ddg2p_id , pubmed_ids , hpo_codes ) = row
hgnc_id = hgnc_symbol_id_map . get ( gencode_gene_name . strip ( ) )
if hgnc_id is None :
LOG . error ( "Couldn't map the gene symbol %s to HGNC." , gencode_gene_name )
unmapped_gene_count += 1
continue
# add the gene
self . model . addClassToGraph ( hgnc_id , gencode_gene_name )
# TODO make VSLC with the variation
# to associate with the disorder
# TODO use the Inheritance and Mutation consequence
# to classify the VSLCs
allele_id = self . make_allele_by_consequence ( consequence , hgnc_id , gencode_gene_name )
if omim . strip ( ) != '' :
omim_id = 'OMIM:' + str ( omim . strip ( ) )
# assume this is declared elsewhere in ontology
self . model . addClassToGraph ( omim_id , None )
# ? ? ? rel is never used
# if category . strip ( ) = = ' Confirmed DD gene ' :
# rel = self . self . globaltt [ ' has phenotype ' ]
# elif category . strip ( ) = = ' Probable DD gene ' :
# rel = self . self . globaltt [ ' has phenotype ' ]
# elif category . strip ( ) = = ' Possible DD gene ' :
# rel = self . self . globaltt [ ' contributes to ' ]
# elif category . strip ( ) = = ' Not DD gene ' :
# # TODO negative annotation
# continue
assoc = G2PAssoc ( graph , self . name , allele_id , omim_id )
# TODO ' rel ' is assigned to but never used
for p in re . split ( r';' , pubmed_ids ) :
p = p . strip ( )
if p != '' :
pmid = 'PMID:' + str ( p )
r = Reference ( graph , pmid , self . globaltt [ 'journal article' ] )
r . addRefToGraph ( )
assoc . add_source ( pmid )
assoc . add_association_to_graph ( )
else : # these are unmapped to a disease id .
# note that some match OMIM disease labels
# but the identifiers are just not included .
# TODO consider mapping to OMIM or DOIDs in other ways
LOG . warning ( "No omim id on line %d\n%s" , line_counter , str ( row ) )
unmapped_omim_counter += 1
# TODO hpo phenotypes
# since the DDG2P file is not documented ,
# I don ' t know what the HPO annotations are actually about
# are they about the gene ? the omim disease ? something else ?
# So , we wont create associations until this is clarified
if not self . test_mode and limit is not None and line_counter > limit :
break
myzip . close ( )
LOG . warning ( "gene-disorder associations with no omim id: %d" , unmapped_omim_counter )
LOG . warning ( "unmapped gene count: %d" , unmapped_gene_count )
return
|
def export ( self , name , columns , points ) :
"""Write the points in Riemann ."""
|
for i in range ( len ( columns ) ) :
if not isinstance ( points [ i ] , Number ) :
continue
else :
data = { 'host' : self . hostname , 'service' : name + " " + columns [ i ] , 'metric' : points [ i ] }
logger . debug ( data )
try :
self . client . send ( data )
except Exception as e :
logger . error ( "Cannot export stats to Riemann (%s)" % e )
|
def update_dvportgroup ( portgroup_ref , spec ) :
'''Updates a distributed virtual portgroup
portgroup _ ref
The portgroup reference
spec
Portgroup spec ( vim . DVPortgroupConfigSpec )'''
|
pg_name = get_managed_object_name ( portgroup_ref )
log . trace ( 'Updating portgrouo %s' , pg_name )
try :
task = portgroup_ref . ReconfigureDVPortgroup_Task ( spec )
except vim . fault . NoPermission as exc :
log . exception ( exc )
raise salt . exceptions . VMwareApiError ( 'Not enough permissions. Required privilege: ' '{0}' . format ( exc . privilegeId ) )
except vim . fault . VimFault as exc :
log . exception ( exc )
raise salt . exceptions . VMwareApiError ( exc . msg )
except vmodl . RuntimeFault as exc :
log . exception ( exc )
raise salt . exceptions . VMwareRuntimeError ( exc . msg )
wait_for_task ( task , pg_name , six . text_type ( task . __class__ ) )
|
def set_attribute ( self , code , value ) :
"""Set attribute for user"""
|
attr , _ = self . get_or_create ( code = code )
attr . value = value
attr . save ( )
|
def _print ( self , method , * args , ** kwargs ) :
"""Output format affects integration tests .
@ see : IntegrationTests . mock _ output"""
|
sess_method = getattr ( self . _session , method )
try :
headers = kwargs [ 'headers' ]
except KeyError :
headers = { }
tpl = '[%s] %s %s'
print ( tpl % ( method , args [ 0 ] , headers ) , end = ' ' )
try :
r = sess_method ( * args , ** kwargs )
except :
e = sys . exc_info ( )
e_str = "%s: %s" % ( e [ 0 ] , e [ 1 ] )
print ( "FAILED (%s)" % e_str )
raise
if method == "get" and r . status_code == 200 :
hsh = hashlib . md5 ( r . content ) . hexdigest ( )
else :
hsh = ""
print ( r . status_code , hsh )
return r
|
def join ( self ) :
"""Wait for all task to finish"""
|
pending = set ( )
exceptions = set ( )
while len ( self . _tasks ) > 0 or len ( pending ) > 0 :
while len ( self . _tasks ) > 0 and len ( pending ) < self . _concurrency :
task , args , kwargs = self . _tasks . pop ( 0 )
pending . add ( task ( * args , ** kwargs ) )
( done , pending ) = yield from asyncio . wait ( pending , return_when = asyncio . FIRST_COMPLETED )
for task in done :
if task . exception ( ) :
exceptions . add ( task . exception ( ) )
if len ( exceptions ) > 0 :
raise exceptions . pop ( )
|
def to_query_parameters_dict ( parameters ) :
"""Converts a dictionary of parameter values into query parameters .
: type parameters : Mapping [ str , Any ]
: param parameters : Dictionary of query parameter values .
: rtype : List [ google . cloud . bigquery . query . _ AbstractQueryParameter ]
: returns : A list of named query parameters ."""
|
return [ scalar_to_query_parameter ( value , name = name ) for name , value in six . iteritems ( parameters ) ]
|
def control ( self , key ) :
"""Send a control command ."""
|
if not self . connection :
raise exceptions . ConnectionClosed ( )
payload = json . dumps ( { "method" : "ms.remote.control" , "params" : { "Cmd" : "Click" , "DataOfCmd" : key , "Option" : "false" , "TypeOfRemote" : "SendRemoteKey" } } )
logging . info ( "Sending control command: %s" , key )
self . connection . send ( payload )
time . sleep ( self . _key_interval )
|
def list_datastores ( kwargs = None , call = None ) :
'''List all the datastores for this VMware environment
CLI Example :
. . code - block : : bash
salt - cloud - f list _ datastores my - vmware - config'''
|
if call != 'function' :
raise SaltCloudSystemExit ( 'The list_datastores function must be called with ' '-f or --function.' )
return { 'Datastores' : salt . utils . vmware . list_datastores ( _get_si ( ) ) }
|
def distance ( p1 , p2 ) :
"""Cartesian distance between two PoseStamped or PoseLists
: param p1 : point 1 ( list , Pose or PoseStamped )
: param p2 : point 2 ( list , Pose or PoseStamped )
: return : cartesian distance ( float )"""
|
def xyz ( some_pose ) :
if isinstance ( some_pose , PoseStamped ) :
return some_pose . pose . position . x , some_pose . pose . position . y , some_pose . pose . position . z
elif isinstance ( some_pose , Pose ) :
return some_pose . position . x , some_pose . position . y , some_pose . position . z
elif _is_indexable ( some_pose [ 0 ] ) :
return some_pose [ 0 ] [ 0 ] , some_pose [ 0 ] [ 1 ] , some_pose [ 0 ] [ 2 ]
else :
return some_pose [ 0 ] , some_pose [ 1 ] , some_pose [ 2 ]
x1 , y1 , z1 = xyz ( p1 )
x2 , y2 , z2 = xyz ( p2 )
x = x1 - x2
y = y1 - y2
z = z1 - z2
return sqrt ( x * x + y * y + z * z )
|
def export_to_txt ( table , filename_or_fobj = None , encoding = None , frame_style = "ASCII" , safe_none_frame = True , * args , ** kwargs ) :
"""Export a ` rows . Table ` to text .
This function can return the result as a string or save into a file ( via
filename or file - like object ) .
` encoding ` could be ` None ` if no filename / file - like object is specified ,
then the return type will be ` six . text _ type ` .
` frame _ style ` : will select the frame style to be printed around data .
Valid values are : ( ' None ' , ' ASCII ' , ' single ' , ' double ' ) - ASCII is default .
Warning : no checks are made to check the desired encoding allows the
characters needed by single and double frame styles .
` safe _ none _ frame ` : bool , defaults to True . Affects only output with
frame _ style = = " None " :
column titles are left - aligned and have
whitespace replaced for " _ " . This enables
the output to be parseable . Otherwise , the generated table will look
prettier but can not be imported back ."""
|
# TODO : will work only if table . fields is OrderedDict
frame_style = _parse_frame_style ( frame_style )
frame = FRAMES [ frame_style . lower ( ) ]
serialized_table = serialize ( table , * args , ** kwargs )
field_names = next ( serialized_table )
table_rows = list ( serialized_table )
max_sizes = _max_column_sizes ( field_names , table_rows )
dashes = [ frame [ "HORIZONTAL" ] * ( max_sizes [ field ] + 2 ) for field in field_names ]
if frame_style != "None" or not safe_none_frame :
header = [ field . center ( max_sizes [ field ] ) for field in field_names ]
else :
header = [ field . replace ( " " , "_" ) . ljust ( max_sizes [ field ] ) for field in field_names ]
header = "{0} {1} {0}" . format ( frame [ "VERTICAL" ] , " {} " . format ( frame [ "VERTICAL" ] ) . join ( header ) )
top_split_line = ( frame [ "DOWN AND RIGHT" ] + frame [ "DOWN AND HORIZONTAL" ] . join ( dashes ) + frame [ "DOWN AND LEFT" ] )
body_split_line = ( frame [ "VERTICAL AND RIGHT" ] + frame [ "VERTICAL AND HORIZONTAL" ] . join ( dashes ) + frame [ "VERTICAL AND LEFT" ] )
botton_split_line = ( frame [ "UP AND RIGHT" ] + frame [ "UP AND HORIZONTAL" ] . join ( dashes ) + frame [ "UP AND LEFT" ] )
result = [ ]
if frame_style != "None" :
result += [ top_split_line ]
result += [ header , body_split_line ]
for row in table_rows :
values = [ value . rjust ( max_sizes [ field_name ] ) for field_name , value in zip ( field_names , row ) ]
row_data = " {} " . format ( frame [ "VERTICAL" ] ) . join ( values )
result . append ( "{0} {1} {0}" . format ( frame [ "VERTICAL" ] , row_data ) )
if frame_style != "None" :
result . append ( botton_split_line )
result . append ( "" )
data = "\n" . join ( result )
if encoding is not None :
data = data . encode ( encoding )
return export_data ( filename_or_fobj , data , mode = "wb" )
|
def SetupPrometheusEndpointOnPort ( port , addr = '' ) :
"""Exports Prometheus metrics on an HTTPServer running in its own thread .
The server runs on the given port and is by default listenning on
all interfaces . This HTTPServer is fully independent of Django and
its stack . This offers the advantage that even if Django becomes
unable to respond , the HTTPServer will continue to function and
export metrics . However , this also means that the features
offered by Django ( like middlewares or WSGI ) can ' t be used .
Now here ' s the really weird part . When Django runs with the
auto - reloader enabled ( which is the default , you can disable it
with ` manage . py runserver - - noreload ` ) , it forks and executes
manage . py twice . That ' s wasteful but usually OK . It starts being a
problem when you try to open a port , like we do . We can detect
that we ' re running under an autoreloader through the presence of
the RUN _ MAIN environment variable , so we abort if we ' re trying to
export under an autoreloader and trying to open a port ."""
|
assert os . environ . get ( 'RUN_MAIN' ) != 'true' , ( 'The thread-based exporter can\'t be safely used when django\'s ' 'autoreloader is active. Use the URL exporter, or start django ' 'with --noreload. See documentation/exports.md.' )
prometheus_client . start_http_server ( port , addr = addr )
|
def control_gate ( control : Qubit , gate : Gate ) -> Gate :
"""Return a controlled unitary gate . Given a gate acting on K qubits ,
return a new gate on K + 1 qubits prepended with a control bit ."""
|
if control in gate . qubits :
raise ValueError ( 'Gate and control qubits overlap' )
qubits = [ control , * gate . qubits ]
gate_tensor = join_gates ( P0 ( control ) , identity_gate ( gate . qubits ) ) . tensor + join_gates ( P1 ( control ) , gate ) . tensor
controlled_gate = Gate ( qubits = qubits , tensor = gate_tensor )
return controlled_gate
|
def unregister ( self , measurement_class , callback ) :
"""Stop notifying ` ` callback ` ` of new values of ` ` measurement _ class ` ` .
If the callback wasn ' t previously registered , this method will have no
effect ."""
|
self . callbacks [ Measurement . name_from_class ( measurement_class ) ] . remove ( callback )
|
def copy ( self ) :
"""Make a copy of the SegmentList .
: return : A copy of the SegmentList instance .
: rtype : angr . analyses . cfg _ fast . SegmentList"""
|
n = SegmentList ( )
n . _list = [ a . copy ( ) for a in self . _list ]
n . _bytes_occupied = self . _bytes_occupied
return n
|
def _init_glyph ( self , plot , mapping , properties ) :
"""Returns a Bokeh glyph object ."""
|
box = Span ( level = properties . get ( 'level' , 'glyph' ) , ** mapping )
plot . renderers . append ( box )
return None , box
|
def _create_sync_map ( self , sync_root ) :
"""If requested , check that the computed sync map is consistent .
Then , add it to the Task ."""
|
sync_map = SyncMap ( tree = sync_root , rconf = self . rconf , logger = self . logger )
if self . rconf . safety_checks :
self . log ( u"Running sanity check on computed sync map..." )
if not sync_map . leaves_are_consistent :
self . _step_failure ( ValueError ( u"The computed sync map contains inconsistent fragments" ) )
self . log ( u"Running sanity check on computed sync map... passed" )
else :
self . log ( u"Not running sanity check on computed sync map" )
self . task . sync_map = sync_map
|
def joinStringsInList ( literalEntities , prefLanguage = "en" ) :
"""from a list of literals , returns the ones in prefLanguage joined up .
if the desired language specification is not available , join all up"""
|
match = [ ]
if len ( literalEntities ) == 1 :
return literalEntities [ 0 ]
elif len ( literalEntities ) > 1 :
for x in literalEntities :
if getattr ( x , 'language' ) and getattr ( x , 'language' ) == prefLanguage :
match . append ( x )
if not match : # don ' t bother about language
for x in literalEntities :
match . append ( x )
return " - " . join ( [ x for x in match ] )
|
def _ctypes_splice ( parameter ) :
"""Returns a list of variable names that define the size of each dimension ."""
|
params = parameter . ctypes_parameter ( )
if parameter . direction == "(inout)" and ( "allocatable" in parameter . modifiers or "pointer" in parameter . modifiers ) :
return ', ' . join ( params [ 1 : - 1 ] )
else :
return ', ' . join ( params [ 1 : : ] )
|
def index ( self , axes ) :
""": param axes : The Axes instance to find the index of .
: type axes : Axes
: rtype : int"""
|
return None if axes is self . _colormap_axes else self . _axes . index ( axes )
|
def decorate_callable ( self , target ) :
"""Called as a decorator ."""
|
# noinspection PyUnusedLocal
def absorb_mocks ( test_case , * args ) :
return target ( test_case )
should_absorb = not ( self . pass_mocks or isinstance ( target , type ) )
result = absorb_mocks if should_absorb else target
for patcher in self . patchers :
result = patcher ( result )
return result
|
async def container_dump ( self , container , container_type , params = None , obj = None ) :
"""Dumps container of elements to the writer .
: param container :
: param container _ type :
: param params :
: param obj :
: return :"""
|
elem_type = x . container_elem_type ( container_type , params )
obj = [ ] if not x . has_elem ( obj ) else x . get_elem ( obj )
# todo : pod container , just concat blobs / serialized content together . loading = size / elem size . . .
if container is None : # todo : reconsider
return NoSetSentinel ( )
# if not self . modelize else ArrayModel ( obj , xmr _ type _ to _ type ( elem _ type ) )
for idx , elem in enumerate ( container ) :
try :
self . tracker . push_index ( idx )
fvalue = await self . _dump_field ( elem , elem_type , params [ 1 : ] if params else None )
self . tracker . pop ( )
except Exception as e :
raise helpers . ArchiveException ( e , tracker = self . tracker ) from e
if not isinstance ( fvalue , NoSetSentinel ) :
obj . append ( fvalue )
return obj if not self . modelize else ArrayModel ( obj , xmr_type_to_type ( elem_type ) )
|
def H6 ( self ) :
"Sum average ."
|
if not hasattr ( self , '_H6' ) :
self . _H6 = ( ( self . rlevels2 + 2 ) * self . p_xplusy ) . sum ( 1 )
return self . _H6
|
def databases ( self ) :
"""list of databases available from eutils ( per einfo query )"""
|
try :
return self . _databases
except AttributeError :
self . _databases = self . einfo ( ) . databases
return self . _databases
|
def send_email ( self , recipients , subject , body , attachments = None ) :
"""Prepare and send email to the recipients
: param recipients : a list of email or name , email strings
: param subject : the email subject
: param body : the email body
: param attachments : list of email attachments
: returns : True if all emails were sent , else false"""
|
recipient_pairs = map ( self . parse_email , recipients )
template_context = { "recipients" : "\n" . join ( map ( lambda p : formataddr ( p ) , recipient_pairs ) ) }
body_template = Template ( safe_unicode ( body ) ) . safe_substitute ( ** template_context )
_preamble = "This is a multi-part message in MIME format.\n"
_from = formataddr ( ( self . email_from_name , self . email_from_address ) )
_subject = Header ( s = safe_unicode ( subject ) , charset = "utf8" )
_body = MIMEText ( body_template , _subtype = "plain" , _charset = "utf8" )
# Create the enclosing message
mime_msg = MIMEMultipart ( )
mime_msg . preamble = _preamble
mime_msg [ "Subject" ] = _subject
mime_msg [ "From" ] = _from
mime_msg . attach ( _body )
# Attach attachments
for attachment in attachments :
mime_msg . attach ( attachment )
success = [ ]
# Send one email per recipient
for pair in recipient_pairs : # N . B . : Headers are added additive , so we need to remove any
# existing " To " headers
# No KeyError is raised if the key does not exist .
# https : / / docs . python . org / 2 / library / email . message . html # email . message . Message . _ _ delitem _ _
del mime_msg [ "To" ]
# N . B . we use just the email here to prevent this Postfix Error :
# Recipient address rejected : User unknown in local recipient table
mime_msg [ "To" ] = pair [ 1 ]
msg_string = mime_msg . as_string ( )
sent = self . send ( msg_string )
if not sent :
logger . error ( "Could not send email to {}" . format ( pair ) )
success . append ( sent )
if not all ( success ) :
return False
return True
|
def get_bookmarks ( self , folder = 'unread' , limit = 25 , have = None ) :
"""Return list of user ' s bookmarks .
: param str folder : Optional . Possible values are unread ( default ) ,
starred , archive , or a folder _ id value .
: param int limit : Optional . A number between 1 and 500 , default 25.
: param list have : Optional . A list of IDs to exclude from results
: returns : List of user ' s bookmarks
: rtype : list"""
|
path = 'bookmarks/list'
params = { 'folder_id' : folder , 'limit' : limit }
if have :
have_concat = ',' . join ( str ( id_ ) for id_ in have )
params [ 'have' ] = have_concat
response = self . request ( path , params )
items = response [ 'data' ]
bookmarks = [ ]
for item in items :
if item . get ( 'type' ) == 'error' :
raise Exception ( item . get ( 'message' ) )
elif item . get ( 'type' ) == 'bookmark' :
bookmarks . append ( Bookmark ( self , ** item ) )
return bookmarks
|
def RegexField ( regex , default = NOTHING , required = True , repr = True , cmp = True , key = None ) :
"""Create new str field on a model .
: param regex : regex validation string ( e . g . " [ ^ @ ] + @ [ ^ @ ] + " for email )
: param default : any string value
: param bool required : whether or not the object is invalid if not provided .
: param bool repr : include this field should appear in object ' s repr .
: param bool cmp : include this field in generated comparison .
: param string key : override name of the value when converted to dict ."""
|
default = _init_fields . init_default ( required , default , None )
validator = _init_fields . init_validator ( required , string_types , validators . regex ( regex ) )
return attrib ( default = default , converter = converters . str_if_not_none , validator = validator , repr = repr , cmp = cmp , metadata = dict ( key = key ) )
|
async def _load_message_field ( self , reader , msg , field ) :
"""Loads message field from the reader . Field is defined by the message field specification .
Returns loaded value , supports field reference .
: param reader :
: param msg :
: param field :
: return :"""
|
fname , ftype , params = field [ 0 ] , field [ 1 ] , field [ 2 : ]
await self . load_field ( reader , ftype , params , eref ( msg , fname ) )
|
def normalize_dict ( dictionary , ** kwargs ) :
"""Given an dict , normalize all of their keys using normalize function ."""
|
result = { }
if isinstance ( dictionary , dict ) :
keys = list ( dictionary . keys ( ) )
for key in keys :
result [ normalizer ( key , ** kwargs ) ] = normalize_dict ( dictionary . get ( key ) , ** kwargs )
else :
result = dictionary
return result
|
def safe_chmod ( path , mode ) :
"""Set the permissions mode on path , but only if it differs from the current mode ."""
|
if stat . S_IMODE ( os . stat ( path ) . st_mode ) != mode :
os . chmod ( path , mode )
|
def margin ( self , value ) :
"""Setter for * * self . _ _ margin * * attribute .
: param value : Attribute value .
: type value : int"""
|
if value is not None :
assert type ( value ) is int , "'{0}' attribute: '{1}' type is not 'int'!" . format ( "margin" , value )
assert value > 0 , "'{0}' attribute: '{1}' need to be exactly positive!" . format ( "margin" , value )
self . __margin = value
|
def retrieve ( self , request , * args , ** kwargs ) :
"""To set quota limit issue a * * PUT * * request against * / api / quotas / < quota uuid > * * with limit values .
Please note that if a quota is a cache of a backend quota ( e . g . ' storage ' size of an OpenStack tenant ) ,
it will be impossible to modify it through * / api / quotas / < quota uuid > * * endpoint .
Example of changing quota limit :
. . code - block : : http
POST / api / quotas / 6ad5f49d6d6c49648573b2b71f44a42b / HTTP / 1.1
Content - Type : application / json
Accept : application / json
Authorization : Token c84d653b9ec92c6cbac41c706593e66f567a7fa4
Host : example . com
" limit " : 2000.0
Example of changing quota threshold :
. . code - block : : http
PUT / api / quotas / 6ad5f49d6d6c49648573b2b71f44a42b / HTTP / 1.1
Content - Type : application / json
Accept : application / json
Authorization : Token c84d653b9ec92c6cbac41c706593e66f567a7fa4
Host : example . com
" threshold " : 100.0"""
|
return super ( QuotaViewSet , self ) . retrieve ( request , * args , ** kwargs )
|
def format ( self ) :
'''Return format dict .'''
|
c = n = ''
if not self . both :
c = ' (code only)'
if self . leng :
n = ' (%s)' % _nameof ( self . leng )
return _kwds ( base = self . base , item = self . item , leng = n , code = c , kind = self . kind )
|
def remove_datastore ( datastore , service_instance = None ) :
'''Removes a datastore . If multiple datastores an error is raised .
datastore
Datastore name
service _ instance
Service instance ( vim . ServiceInstance ) of the vCenter / ESXi host .
Default is None .
. . code - block : : bash
salt ' * ' vsphere . remove _ datastore ds _ name'''
|
log . trace ( 'Removing datastore \'%s\'' , datastore )
target = _get_proxy_target ( service_instance )
datastores = salt . utils . vmware . get_datastores ( service_instance , reference = target , datastore_names = [ datastore ] )
if not datastores :
raise VMwareObjectRetrievalError ( 'Datastore \'{0}\' was not found' . format ( datastore ) )
if len ( datastores ) > 1 :
raise VMwareObjectRetrievalError ( 'Multiple datastores \'{0}\' were found' . format ( datastore ) )
salt . utils . vmware . remove_datastore ( service_instance , datastores [ 0 ] )
return True
|
def render_settingsLink ( self , ctx , data ) :
"""Add the URL of the settings page to the given tag .
@ see L { xmantissa . webnav . settingsLink }"""
|
return settingsLink ( self . translator , self . pageComponents . settings , ctx . tag )
|
def get_minions ( ) :
'''Return a list of minions'''
|
with _get_serv ( ret = None , commit = True ) as cur :
sql = '''SELECT DISTINCT id
FROM `salt_returns`'''
cur . execute ( sql )
data = cur . fetchall ( )
ret = [ ]
for minion in data :
ret . append ( minion [ 0 ] )
return ret
|
def histogram ( data , bins = None , * args , ** kwargs ) :
"""Facade function to create 1D histograms .
This proceeds in three steps :
1 ) Based on magical parameter bins , construct bins for the histogram
2 ) Calculate frequencies for the bins
3 ) Construct the histogram object itself
* Guiding principle : * parameters understood by numpy . histogram should be
understood also by physt . histogram as well and should result in a Histogram1D
object with ( h . numpy _ bins , h . frequencies ) same as the numpy . histogram
output . Additional functionality is a bonus .
This function is also aliased as " h1 " .
Parameters
data : array _ like , optional
Container of all the values ( tuple , list , np . ndarray , pd . Series )
bins : int or sequence of scalars or callable or str , optional
If iterable = > the bins themselves
If int = > number of bins for default binning
If callable = > use binning method ( + args , kwargs )
If string = > use named binning method ( + args , kwargs )
weights : array _ like , optional
( as numpy . histogram )
keep _ missed : Optional [ bool ]
store statistics about how many values were lower than limits
and how many higher than limits ( default : True )
dropna : bool
whether to clear data from nan ' s before histogramming
name : str
name of the histogram
axis _ name : str
name of the variable on x axis
adaptive : bool
whether we want the bins to be modifiable
( useful for continuous filling of a priori unknown data )
dtype : type
customize underlying data type : default int64 ( without weight ) or float ( with weights )
Other numpy . histogram parameters are excluded , see the methods of the Histogram1D class itself .
Returns
physt . histogram1d . Histogram1D
See Also
numpy . histogram"""
|
import numpy as np
from . histogram1d import Histogram1D , calculate_frequencies
from . binnings import calculate_bins
adaptive = kwargs . pop ( "adaptive" , False )
dtype = kwargs . pop ( "dtype" , None )
if isinstance ( data , tuple ) and isinstance ( data [ 0 ] , str ) : # Works for groupby DataSeries
return histogram ( data [ 1 ] , bins , * args , name = data [ 0 ] , ** kwargs )
elif type ( data ) . __name__ == "DataFrame" :
raise RuntimeError ( "Cannot create histogram from a pandas DataFrame. Use Series." )
# Collect arguments ( not to send them to binning algorithms )
dropna = kwargs . pop ( "dropna" , True )
weights = kwargs . pop ( "weights" , None )
keep_missed = kwargs . pop ( "keep_missed" , True )
name = kwargs . pop ( "name" , None )
axis_name = kwargs . pop ( "axis_name" , None )
title = kwargs . pop ( "title" , None )
# Convert to array
if data is not None :
array = np . asarray ( data )
# . flatten ( )
if dropna :
array = array [ ~ np . isnan ( array ) ]
else :
array = None
# Get binning
binning = calculate_bins ( array , bins , * args , check_nan = not dropna and array is not None , adaptive = adaptive , ** kwargs )
# bins = binning . bins
# Get frequencies
if array is not None :
( frequencies , errors2 , underflow , overflow , stats ) = calculate_frequencies ( array , binning = binning , weights = weights , dtype = dtype )
else :
frequencies = None
errors2 = None
underflow = 0
overflow = 0
stats = { "sum" : 0.0 , "sum2" : 0.0 }
# Construct the object
if not keep_missed :
underflow = 0
overflow = 0
if not axis_name :
if hasattr ( data , "name" ) :
axis_name = data . name
elif hasattr ( data , "fields" ) and len ( data . fields ) == 1 and isinstance ( data . fields [ 0 ] , str ) : # Case of dask fields ( examples )
axis_name = data . fields [ 0 ]
return Histogram1D ( binning = binning , frequencies = frequencies , errors2 = errors2 , overflow = overflow , underflow = underflow , stats = stats , dtype = dtype , keep_missed = keep_missed , name = name , axis_name = axis_name , title = title )
|
def next ( self , data , final = False , to_buffer = False ) :
"""Add more input to the HMAC SHA1."""
|
if final :
self . flags = pyhsm . defines . YSM_HMAC_SHA1_FINAL
else :
self . flags = 0x0
if to_buffer :
self . flags |= pyhsm . defines . YSM_HMAC_SHA1_TO_BUFFER
self . payload = _raw_pack ( self . key_handle , self . flags , data )
self . final = final
return self
|
def function_call_prepare_action ( self , text , loc , fun ) :
"""Code executed after recognising a function call ( type and function name )"""
|
exshared . setpos ( loc , text )
if DEBUG > 0 :
print ( "FUN_PREP:" , fun )
if DEBUG == 2 :
self . symtab . display ( )
if DEBUG > 2 :
return
index = self . symtab . lookup_symbol ( fun . name , SharedData . KINDS . FUNCTION )
if index == None :
raise SemanticException ( "'%s' is not a function" % fun . name )
# save any previous function call data ( for nested function calls )
self . function_call_stack . append ( self . function_call_index )
self . function_call_index = index
self . function_arguments_stack . append ( self . function_arguments [ : ] )
del self . function_arguments [ : ]
self . codegen . save_used_registers ( )
|
def arccalibration_direct ( wv_master , ntriplets_master , ratios_master_sorted , triplets_master_sorted_list , xpos_arc , naxis1_arc , crpix1 , wv_ini_search , wv_end_search , wvmin_useful = None , wvmax_useful = None , error_xpos_arc = 1.0 , times_sigma_r = 3.0 , frac_triplets_for_sum = 0.50 , times_sigma_theil_sen = 10.0 , poly_degree_wfit = 3 , times_sigma_polfilt = 10.0 , times_sigma_cook = 10.0 , times_sigma_inclusion = 5.0 , geometry = None , debugplot = 0 ) :
"""Performs line identification for arc calibration using line triplets .
This function assumes that a previous call to the function
responsible for the computation of information related to the
triplets derived from the master table has been previously
executed .
Parameters
wv _ master : 1d numpy array , float
Array with wavelengths corresponding to the master table
( Angstroms ) .
ntriplets _ master : int
Number of triplets built from master table .
ratios _ master _ sorted : 1d numpy array , float
Array with values of the relative position of the central line
of each triplet , sorted in ascending order .
triplets _ master _ sorted _ list : list of tuples
List with tuples of three numbers , corresponding to the three
line indices in the master table . The list is sorted to be in
correspondence with ` ratios _ master _ sorted ` .
xpos _ arc : 1d numpy array , float
Location of arc lines ( pixels ) .
naxis1 _ arc : int
NAXIS1 for arc spectrum .
crpix1 : float
CRPIX1 value to be employed in the wavelength calibration .
wv _ ini _ search : float
Minimum expected wavelength in spectrum .
wv _ end _ search : float
Maximum expected wavelength in spectrum .
wvmin _ useful : float or None
If not None , this value is used to clip detected lines below it .
wvmax _ useful : float or None
If not None , this value is used to clip detected lines above it .
error _ xpos _ arc : float
Error in arc line position ( pixels ) .
times _ sigma _ r : float
Times sigma to search for valid line position ratios .
frac _ triplets _ for _ sum : float
Fraction of distances to different triplets to sum when
computing the cost function .
times _ sigma _ theil _ sen : float
Number of times the ( robust ) standard deviation around the
linear fit ( using the Theil - Sen method ) to reject points .
poly _ degree _ wfit : int
Degree for polynomial fit to wavelength calibration .
times _ sigma _ polfilt : float
Number of times the ( robust ) standard deviation around the
polynomial fit to reject points .
times _ sigma _ cook : float
Number of times the standard deviation of Cook ' s distances
to detect outliers . If zero , this method of outlier detection
is ignored .
times _ sigma _ inclusion : float
Number of times the ( robust ) standard deviation around the
polynomial fit to include a new line in the set of identified
lines .
geometry : tuple ( 4 integers ) or None
x , y , dx , dy values employed to set the window geometry .
debugplot : int
Determines whether intermediate computations and / or plots
are displayed . The valid codes are defined in
numina . array . display . pause _ debugplot .
Returns
list _ of _ wvfeatures : list ( of WavecalFeature instances )
A list of size equal to the number of identified lines , which
elements are instances of the class WavecalFeature , containing
all the relevant information concerning the line
identification ."""
|
nlines_master = wv_master . size
delta_wv = 0.20 * ( wv_master . max ( ) - wv_master . min ( ) )
if wv_ini_search is None :
wv_ini_search = wv_master . min ( ) - delta_wv
if wv_end_search is None :
wv_end_search = wv_master . max ( ) + delta_wv
nlines_arc = xpos_arc . size
if nlines_arc < 5 :
raise ValueError ( 'Insufficient arc lines=' + str ( nlines_arc ) )
# Generate triplets with consecutive arc lines . For each triplet ,
# compatible triplets from the master table are sought . Each
# compatible triplet from the master table provides an estimate for
# CRVAL1 and CDELT1 . As an additional constraint , the only valid
# solutions are those for which the initial and the final
# wavelengths for the arc are restricted to a predefined wavelength
# interval .
crval1_search = np . array ( [ ] )
cdelt1_search = np . array ( [ ] )
error_crval1_search = np . array ( [ ] )
error_cdelt1_search = np . array ( [ ] )
itriplet_search = np . array ( [ ] , dtype = int )
clabel_search = [ ]
ntriplets_arc = nlines_arc - 2
if abs ( debugplot ) >= 10 :
print ( '>>> Total number of arc lines............:' , nlines_arc )
print ( '>>> Total number of arc triplets.........:' , ntriplets_arc )
# maximum allowed value for CDELT1
cdelt1_max = ( wv_end_search - wv_ini_search ) / float ( naxis1_arc - 1 )
# Loop in all the arc line triplets . Note that only triplets built
# from consecutive arc lines are considered .
for i in range ( ntriplets_arc ) :
i1 , i2 , i3 = i , i + 1 , i + 2
dist12 = xpos_arc [ i2 ] - xpos_arc [ i1 ]
dist13 = xpos_arc [ i3 ] - xpos_arc [ i1 ]
ratio_arc = dist12 / dist13
pol_r = ratio_arc * ( ratio_arc - 1 ) + 1
error_ratio_arc = np . sqrt ( 2 ) * error_xpos_arc / dist13 * np . sqrt ( pol_r )
ratio_arc_min = max ( 0.0 , ratio_arc - times_sigma_r * error_ratio_arc )
ratio_arc_max = min ( 1.0 , ratio_arc + times_sigma_r * error_ratio_arc )
# determine compatible triplets from the master list
j_loc_min = np . searchsorted ( ratios_master_sorted , ratio_arc_min ) - 1
j_loc_max = np . searchsorted ( ratios_master_sorted , ratio_arc_max ) + 1
if j_loc_min < 0 :
j_loc_min = 0
if j_loc_max > ntriplets_master :
j_loc_max = ntriplets_master
if abs ( debugplot ) >= 10 :
print ( i , ratio_arc_min , ratio_arc , ratio_arc_max , j_loc_min , j_loc_max )
# each triplet from the master list provides a potential
# solution for CRVAL1 and CDELT1
for j_loc in range ( j_loc_min , j_loc_max ) :
j1 , j2 , j3 = triplets_master_sorted_list [ j_loc ]
# initial solutions for CDELT1 , CRVAL1 and CRMAX1
cdelt1_temp = ( wv_master [ j3 ] - wv_master [ j1 ] ) / dist13
crval1_temp = wv_master [ j2 ] - ( xpos_arc [ i2 ] - crpix1 ) * cdelt1_temp
crmin1_temp = crval1_temp + float ( 1 - crpix1 ) * cdelt1_temp
crmax1_temp = crval1_temp + float ( naxis1_arc - crpix1 ) * cdelt1_temp
# check that CRMIN1 and CRMAX1 are within the valid limits
if wv_ini_search <= crmin1_temp <= wv_end_search and cdelt1_temp <= cdelt1_max : # Compute errors
error_crval1_temp = cdelt1_temp * error_xpos_arc * np . sqrt ( 1 + 2 * ( ( xpos_arc [ i2 ] - crpix1 ) ** 2 ) / ( dist13 ** 2 ) )
error_cdelt1_temp = np . sqrt ( 2 ) * cdelt1_temp * error_xpos_arc / dist13
# Store values and errors
crval1_search = np . append ( crval1_search , [ crval1_temp ] )
cdelt1_search = np . append ( cdelt1_search , [ cdelt1_temp ] )
error_crval1_search = np . append ( error_crval1_search , [ error_crval1_temp ] )
error_cdelt1_search = np . append ( error_cdelt1_search , [ error_cdelt1_temp ] )
# Store additional information about the triplets
itriplet_search = np . append ( itriplet_search , [ i ] )
clabel_search . append ( ( j1 , j2 , j3 ) )
# normalize the values of CDELT1 and CRVAL1 to the interval [ 0,1]
# in each case
cdelt1_search_norm = cdelt1_search / cdelt1_max
error_cdelt1_search_norm = error_cdelt1_search / cdelt1_max
crval1_search_norm = ( crval1_search - wv_ini_search )
crval1_search_norm /= ( wv_end_search - wv_ini_search )
error_crval1_search_norm = error_crval1_search
error_crval1_search_norm /= ( wv_end_search - wv_ini_search )
# intermediate plots
if abs ( debugplot ) in [ 21 , 22 ] :
from numina . array . display . matplotlib_qt import plt
# CDELT1 vs CRVAL1 diagram ( original coordinates )
fig = plt . figure ( )
ax = fig . add_subplot ( 111 )
ax . set_xlabel ( 'cdelt1 (Angstroms/pixel)' )
ax . set_ylabel ( 'crval1 (Angstroms)' )
ax . scatter ( cdelt1_search , crval1_search , s = 200 , alpha = 0.1 )
xmin = 0.0
xmax = cdelt1_max
dx = xmax - xmin
xmin -= dx / 20
xmax += dx / 20
ax . set_xlim ( xmin , xmax )
ymin = wv_ini_search
ymax = wv_end_search
dy = ymax - ymin
ymin -= dy / 20
ymax += dy / 20
ax . set_ylim ( ymin , ymax )
xp_limits = np . array ( [ 0. , cdelt1_max ] )
yp_limits = wv_end_search - float ( naxis1_arc - 1 ) * xp_limits
xp_limits = np . concatenate ( ( xp_limits , [ xp_limits [ 0 ] , xp_limits [ 0 ] ] ) )
yp_limits = np . concatenate ( ( yp_limits , [ yp_limits [ 1 ] , yp_limits [ 0 ] ] ) )
ax . plot ( xp_limits , yp_limits , linestyle = '-' , color = 'magenta' )
ax . set_title ( "Potential solutions within the valid parameter space" )
# set window geometry
set_window_geometry ( geometry )
print ( 'Number of points in last plot:' , len ( cdelt1_search ) )
pause_debugplot ( debugplot , pltshow = True , tight_layout = True )
# CDELT1 vs CRVAL1 diagram ( normalized coordinates )
fig = plt . figure ( )
ax = fig . add_subplot ( 111 )
ax . set_xlabel ( 'normalized cdelt1' )
ax . set_ylabel ( 'normalized crval1' )
ax . scatter ( cdelt1_search_norm , crval1_search_norm , s = 200 , alpha = 0.1 )
xmin = - 0.05
xmax = 1.05
ymin = - 0.05
ymax = 1.05
xp_limits = np . array ( [ 0. , 1. , 0. , 0. ] )
yp_limits = np . array ( [ 1. , 0. , 0. , 1. ] )
ax . set_xlim ( xmin , xmax )
ax . set_ylim ( ymin , ymax )
ax . plot ( xp_limits , yp_limits , linestyle = '-' , color = 'magenta' )
ax . set_title ( "Potential solutions within the valid parameter space" )
# set window geometry
set_window_geometry ( geometry )
print ( 'Number of points in last plot:' , len ( cdelt1_search_norm ) )
pause_debugplot ( debugplot , pltshow = True , tight_layout = True )
# CDELT1 vs CRVAL1 diagram ( normalized coordinates )
# with different color for each arc triplet and overplotting
# the arc triplet number
fig = plt . figure ( )
ax = fig . add_subplot ( 111 )
ax . set_xlabel ( 'normalized cdelt1' )
ax . set_ylabel ( 'normalized crval1' )
ax . scatter ( cdelt1_search_norm , crval1_search_norm , s = 200 , alpha = 0.1 , c = itriplet_search )
for i in range ( len ( itriplet_search ) ) :
ax . text ( cdelt1_search_norm [ i ] , crval1_search_norm [ i ] , str ( int ( itriplet_search [ i ] ) ) , fontsize = 6 )
ax . set_xlim ( xmin , xmax )
ax . set_ylim ( ymin , ymax )
ax . plot ( xp_limits , yp_limits , linestyle = '-' , color = 'magenta' )
ax . set_title ( "Potential solutions: arc line triplet number" )
# set window geometry
set_window_geometry ( geometry )
print ( 'Number of points in last plot:' , len ( cdelt1_search_norm ) )
pause_debugplot ( debugplot , pltshow = True , tight_layout = True )
# CDELT1 vs CRVAL1 diagram ( normalized coordinates )
# including triplet numbers
fig = plt . figure ( )
ax = fig . add_subplot ( 111 )
ax . set_xlabel ( 'normalized cdelt1' )
ax . set_ylabel ( 'normalized crval1' )
ax . scatter ( cdelt1_search_norm , crval1_search_norm , s = 200 , alpha = 0.1 , c = itriplet_search )
for i in range ( len ( clabel_search ) ) :
ax . text ( cdelt1_search_norm [ i ] , crval1_search_norm [ i ] , clabel_search [ i ] , fontsize = 6 )
ax . set_xlim ( xmin , xmax )
ax . set_ylim ( ymin , ymax )
ax . plot ( xp_limits , yp_limits , linestyle = '-' , color = 'magenta' )
ax . set_title ( "Potential solutions: master line triplets" )
# set window geometry
set_window_geometry ( geometry )
print ( 'Number of points in last plot:' , len ( cdelt1_search_norm ) )
pause_debugplot ( debugplot , pltshow = True , tight_layout = True )
# CDELT1 vs CRVAL1 diagram ( normalized coordinates )
# with error bars ( note that errors in this plot are highly
# correlated )
fig = plt . figure ( )
ax = fig . add_subplot ( 111 )
ax . set_xlabel ( 'normalized cdelt1' )
ax . set_ylabel ( 'normalized crval1' )
ax . errorbar ( cdelt1_search_norm , crval1_search_norm , xerr = error_cdelt1_search_norm , yerr = error_crval1_search_norm , fmt = 'none' )
ax . set_xlim ( xmin , xmax )
ax . set_ylim ( ymin , ymax )
ax . plot ( xp_limits , yp_limits , linestyle = '-' , color = 'magenta' )
ax . set_title ( "Potential solutions within the valid parameter space" )
# set window geometry
set_window_geometry ( geometry )
print ( 'Number of points in last plot:' , len ( cdelt1_search_norm ) )
pause_debugplot ( debugplot , pltshow = True , tight_layout = True )
# Segregate the different solutions ( normalized to [ 0,1 ] ) by
# triplet . In this way the solutions are saved in different layers
# ( a layer for each triplet ) . The solutions will be stored as python
# lists of numpy arrays .
ntriplets_layered_list = [ ]
cdelt1_layered_list = [ ]
error_cdelt1_layered_list = [ ]
crval1_layered_list = [ ]
error_crval1_layered_list = [ ]
itriplet_layered_list = [ ]
clabel_layered_list = [ ]
for i in range ( ntriplets_arc ) :
ldum = ( itriplet_search == i )
ntriplets_layered_list . append ( ldum . sum ( ) )
cdelt1_dum = cdelt1_search_norm [ ldum ]
cdelt1_layered_list . append ( cdelt1_dum )
error_cdelt1_dum = error_cdelt1_search_norm [ ldum ]
error_cdelt1_layered_list . append ( error_cdelt1_dum )
crval1_dum = crval1_search_norm [ ldum ]
crval1_layered_list . append ( crval1_dum )
error_crval1_dum = error_crval1_search_norm [ ldum ]
error_crval1_layered_list . append ( error_crval1_dum )
itriplet_dum = itriplet_search [ ldum ]
itriplet_layered_list . append ( itriplet_dum )
clabel_dum = [ k for ( k , v ) in zip ( clabel_search , ldum ) if v ]
clabel_layered_list . append ( clabel_dum )
if abs ( debugplot ) >= 10 :
print ( '>>> Total number of potential solutions: ' + str ( sum ( ntriplets_layered_list ) ) + " (double check ==) " + str ( len ( itriplet_search ) ) )
print ( '>>> List with no. of solutions/triplet.:\n' + str ( ntriplets_layered_list ) )
pause_debugplot ( debugplot )
# Computation of the cost function .
# For each solution , corresponding to a particular triplet , find
# the nearest solution in each of the remaining ntriplets _ arc - 1
# layers . Compute the distance ( in normalized coordinates ) to those
# closest solutions , and obtain the sum of distances considering
# only a fraction of them ( after sorting them in ascending order ) .
ntriplets_for_sum = max ( 1 , int ( round ( frac_triplets_for_sum * float ( ntriplets_arc ) ) ) )
funcost_search = np . zeros ( len ( itriplet_search ) )
for k in range ( len ( itriplet_search ) ) :
itriplet_local = itriplet_search [ k ]
x0 = cdelt1_search_norm [ k ]
y0 = crval1_search_norm [ k ]
dist_to_layers = np . array ( [ ] )
for i in range ( ntriplets_arc ) :
if i != itriplet_local :
if ntriplets_layered_list [ i ] > 0 :
x1 = cdelt1_layered_list [ i ]
y1 = crval1_layered_list [ i ]
dist2 = ( x0 - x1 ) ** 2 + ( y0 - y1 ) ** 2
dist_to_layers = np . append ( dist_to_layers , [ min ( dist2 ) ] )
else :
dist_to_layers = np . append ( dist_to_layers , [ np . inf ] )
dist_to_layers . sort ( )
# in - place sort
funcost_search [ k ] = dist_to_layers [ range ( ntriplets_for_sum ) ] . sum ( )
# normalize the cost function
funcost_min = min ( funcost_search )
if abs ( debugplot ) >= 10 :
print ( 'funcost_min:' , funcost_min )
funcost_search /= funcost_min
# segregate the cost function by arc triplet .
funcost_layered_list = [ ]
for i in range ( ntriplets_arc ) :
ldum = ( itriplet_search == i )
funcost_dum = funcost_search [ ldum ]
funcost_layered_list . append ( funcost_dum )
if abs ( debugplot ) >= 10 :
for i in range ( ntriplets_arc ) :
if ntriplets_layered_list [ i ] > 0 :
jdum = funcost_layered_list [ i ] . argmin ( )
print ( '>>>' , i , funcost_layered_list [ i ] [ jdum ] , clabel_layered_list [ i ] [ jdum ] , cdelt1_layered_list [ i ] [ jdum ] , crval1_layered_list [ i ] [ jdum ] )
else :
print ( '>>>' , i , None , "(None, None, None)" , None , None )
pause_debugplot ( debugplot )
# intermediate plots
if abs ( debugplot ) in [ 21 , 22 ] :
from numina . array . display . matplotlib_qt import plt
# CDELT1 vs CRVAL1 diagram ( normalized coordinates ) with symbol
# size proportional to the inverse of the cost function
fig = plt . figure ( )
ax = fig . add_subplot ( 111 )
ax . set_xlabel ( 'normalized cdelt1' )
ax . set_ylabel ( 'normalized crval1' )
ax . scatter ( cdelt1_search_norm , crval1_search_norm , s = 2000 / funcost_search , c = itriplet_search , alpha = 0.2 )
xmin = - 0.05
xmax = 1.05
ymin = - 0.05
ymax = 1.05
xp_limits = np . array ( [ 0. , 1. , 0. , 0. ] )
yp_limits = np . array ( [ 1. , 0. , 0. , 1. ] )
ax . set_xlim ( xmin , xmax )
ax . set_ylim ( ymin , ymax )
ax . plot ( xp_limits , yp_limits , linestyle = '-' , color = 'red' )
ax . set_title ( "Potential solutions within the valid parameter space\n" + "[symbol size proportional to 1/(cost function)]" )
# set window geometry
set_window_geometry ( geometry )
print ( 'Number of points in last plot:' , len ( cdelt1_search_norm ) )
pause_debugplot ( debugplot , pltshow = True , tight_layout = True )
# CDELT1 vs CRVAL1 diagram ( normalized coordinates )
# with symbol size proportional to the inverse of the cost
# function and over - plotting triplet number
fig = plt . figure ( )
ax = fig . add_subplot ( 111 )
ax . set_xlabel ( 'normalized cdelt1' )
ax . set_ylabel ( 'normalized crval1' )
ax . scatter ( cdelt1_search_norm , crval1_search_norm , s = 2000 / funcost_search , c = itriplet_search , alpha = 0.2 )
for i in range ( len ( itriplet_search ) ) :
ax . text ( cdelt1_search_norm [ i ] , crval1_search_norm [ i ] , str ( int ( itriplet_search [ i ] ) ) , fontsize = 6 )
ax . set_xlim ( xmin , xmax )
ax . set_ylim ( ymin , ymax )
ax . plot ( xp_limits , yp_limits , linestyle = '-' , color = 'red' )
ax . set_title ( "Potential solutions: arc line triplet number\n" + "[symbol size proportional to 1/(cost function)]" )
# set window geometry
set_window_geometry ( geometry )
print ( 'Number of points in last plot:' , len ( cdelt1_search ) )
pause_debugplot ( debugplot , pltshow = True , tight_layout = True )
# CDELT1 vs CRVAL1 diagram ( normalized coordinates )
# for i in range ( ntriplets _ arc ) :
# fig = plt . figure ( )
# ax = fig . add _ subplot ( 111)
# ax . set _ xlabel ( ' normalized cdelt1 ' )
# ax . set _ ylabel ( ' normalized crval1 ' )
# xdum = cdelt1 _ layered _ list [ i ]
# ydum = crval1 _ layered _ list [ i ]
# sdum = 2000 / funcost _ layered _ list [ i ]
# ax . scatter ( xdum , ydum , s = sdum , alpha = 0.8)
# ax . set _ xlim ( xmin , xmax )
# ax . set _ ylim ( ymin , ymax )
# ax . plot ( xp _ limits , yp _ limits , linestyle = ' - ' , color = ' red ' )
# ax . set _ title ( " Potential solutions : arc line triplet " + str ( i ) +
# " ( from 0 to " + str ( ntriplets _ arc - 1 ) + " ) \ n " +
# " [ symbol size proportional to 1 / ( cost function ) ] " )
# # set window geometry
# set _ window _ geometry ( geometry )
# print ( ' Number of points in last plot : ' , xdum . size )
# pause _ debugplot ( debugplot , pltshow = True , tight _ layout = True )
# Line identification : several scenarios are considered .
# * Lines with three identifications :
# - Category A : the three identifications are identical . Keep the
# lowest value of the three cost functions .
# - Category B : two identifications are identical and one is
# different . Keep the line with two identifications and the
# lowest of the corresponding two cost functions .
# - Category C : the three identifications are different . Keep the
# one which is closest to a previously identified category B
# line . Use the corresponding cost function .
# * Lines with two identifications ( second and penultimate lines ) .
# - Category D : the two identifications are identical . Keep the
# lowest cost function value .
# * Lines with only one identification ( first and last lines ) .
# - Category E : the two lines next ( or previous ) to the considered
# line have been identified . Keep its cost function .
# We store the identifications of each line in a python list of
# lists named diagonal _ ids ( which grows as the different triplets
# are considered ) . A similar list of lists is also employed to
# store the corresponding cost functions .
# It is important to set the identification of the lines to None
# when no valid master triplet has been associated to a given
# arc line triplet .
for i in range ( ntriplets_arc ) :
if ntriplets_layered_list [ i ] > 0 :
jdum = funcost_layered_list [ i ] . argmin ( )
k1 , k2 , k3 = clabel_layered_list [ i ] [ jdum ]
funcost_dum = funcost_layered_list [ i ] [ jdum ]
else :
k1 , k2 , k3 = None , None , None
funcost_dum = np . inf
if i == 0 :
diagonal_ids = [ [ k1 ] , [ k2 ] , [ k3 ] ]
diagonal_funcost = [ [ funcost_dum ] , [ funcost_dum ] , [ funcost_dum ] ]
else :
diagonal_ids [ i ] . append ( k1 )
diagonal_ids [ i + 1 ] . append ( k2 )
diagonal_ids . append ( [ k3 ] )
diagonal_funcost [ i ] . append ( funcost_dum )
diagonal_funcost [ i + 1 ] . append ( funcost_dum )
diagonal_funcost . append ( [ funcost_dum ] )
if abs ( debugplot ) >= 10 :
for i in range ( nlines_arc ) :
print ( i , diagonal_ids [ i ] , diagonal_funcost [ i ] )
pause_debugplot ( debugplot )
# The solutions are stored in a list of WavecalFeature instances .
# Each WavecalFeature contains the following elements :
# - line _ ok : bool , indicates whether the line has been properly
# identified
# - category : ' A ' , ' B ' , ' C ' , ' D ' , ' E ' , . . . , ' X ' . Note that ' X ' indicates
# that the line is still undefined .
# - id : index of the line in the master table
# - funcost : cost function associated the the line identification
# initialize list _ of _ wvfeatures
list_of_wvfeatures = [ ]
for i in range ( nlines_arc ) :
tmp_feature = WavecalFeature ( line_ok = False , category = 'X' , lineid = - 1 , funcost = np . inf , xpos = xpos_arc [ i ] , ypos = 0.0 , peak = 0.0 , fwhm = 0.0 , reference = 0.0 )
list_of_wvfeatures . append ( tmp_feature )
# set clipping window ( in Angstrom )
# note that potential lines with wavelengths outside the interval
# [ wvmin _ clip , wvmax _ clip ] will be ignored
if wvmin_useful is None :
wvmin_clip = 0.0
else :
wvmin_clip = wvmin_useful
if wvmax_useful is None :
wvmax_clip = 1.0E10
else :
wvmax_clip = wvmax_useful
# Category A lines
for i in range ( 2 , nlines_arc - 2 ) :
j1 , j2 , j3 = diagonal_ids [ i ]
if j1 == j2 == j3 and j1 is not None :
if wvmin_clip <= wv_master [ j1 ] <= wvmax_clip :
list_of_wvfeatures [ i ] . line_ok = True
list_of_wvfeatures [ i ] . category = 'A'
list_of_wvfeatures [ i ] . lineid = j1
list_of_wvfeatures [ i ] . funcost = min ( diagonal_funcost [ i ] )
list_of_wvfeatures [ i ] . reference = wv_master [ j1 ]
if abs ( debugplot ) >= 10 :
print ( '\n* Including category A lines:' )
for i in range ( nlines_arc ) :
print ( i , list_of_wvfeatures [ i ] )
pause_debugplot ( debugplot )
# Category B lines
for i in range ( 2 , nlines_arc - 2 ) :
if not list_of_wvfeatures [ i ] . line_ok :
j1 , j2 , j3 = diagonal_ids [ i ]
f1 , f2 , f3 = diagonal_funcost [ i ]
if j1 == j2 and j1 is not None :
if max ( f1 , f2 ) < f3 :
if wvmin_clip <= wv_master [ j1 ] <= wvmax_clip :
list_of_wvfeatures [ i ] . line_ok = True
list_of_wvfeatures [ i ] . category = 'B'
list_of_wvfeatures [ i ] . lineid = j1
list_of_wvfeatures [ i ] . funcost = min ( f1 , f2 )
list_of_wvfeatures [ i ] . reference = wv_master [ j1 ]
elif j1 == j3 and j1 is not None :
if max ( f1 , f3 ) < f2 :
if wvmin_clip <= wv_master [ j1 ] <= wvmax_clip :
list_of_wvfeatures [ i ] . line_ok = True
list_of_wvfeatures [ i ] . category = 'B'
list_of_wvfeatures [ i ] . lineid = j1
list_of_wvfeatures [ i ] . funcost = min ( f1 , f3 )
list_of_wvfeatures [ i ] . reference = wv_master [ j1 ]
elif j2 == j3 and j2 is not None :
if max ( f2 , f3 ) < f1 :
if wvmin_clip <= wv_master [ j2 ] <= wvmax_clip :
list_of_wvfeatures [ i ] . line_ok = True
list_of_wvfeatures [ i ] . category = 'B'
list_of_wvfeatures [ i ] . lineid = j2
list_of_wvfeatures [ i ] . funcost = min ( f2 , f3 )
list_of_wvfeatures [ i ] . reference = wv_master [ j2 ]
if abs ( debugplot ) >= 10 :
print ( '\n* Including category B lines:' )
for i in range ( nlines_arc ) :
print ( i , list_of_wvfeatures [ i ] )
pause_debugplot ( debugplot )
# Category C lines
for i in range ( 2 , nlines_arc - 2 ) :
if not list_of_wvfeatures [ i ] . line_ok :
j1 , j2 , j3 = diagonal_ids [ i ]
f1 , f2 , f3 = diagonal_funcost [ i ]
if list_of_wvfeatures [ i - 1 ] . category == 'B' :
if min ( f2 , f3 ) > f1 :
if wvmin_clip <= wv_master [ j1 ] <= wvmax_clip :
list_of_wvfeatures [ i ] . line_ok = True
list_of_wvfeatures [ i ] . category = 'C'
list_of_wvfeatures [ i ] . lineid = j1
list_of_wvfeatures [ i ] . funcost = f1
list_of_wvfeatures [ i ] . reference = wv_master [ j1 ]
elif list_of_wvfeatures [ i + 1 ] . category == 'B' :
if min ( f1 , f2 ) > f3 :
if wvmin_clip <= wv_master [ j3 ] <= wvmax_clip :
list_of_wvfeatures [ i ] . line_ok = True
list_of_wvfeatures [ i ] . category = 'C'
list_of_wvfeatures [ i ] . lineid = j3
list_of_wvfeatures [ i ] . funcost = f3
list_of_wvfeatures [ i ] . reference = wv_master [ j3 ]
if abs ( debugplot ) >= 10 :
print ( '\n* Including category C lines:' )
for i in range ( nlines_arc ) :
print ( i , list_of_wvfeatures [ i ] )
pause_debugplot ( debugplot )
# Category D lines
for i in [ 1 , nlines_arc - 2 ] :
j1 , j2 = diagonal_ids [ i ]
if j1 == j2 and j1 is not None :
if wvmin_clip <= wv_master [ j1 ] <= wvmax_clip :
f1 , f2 = diagonal_funcost [ i ]
list_of_wvfeatures [ i ] . line_ok = True
list_of_wvfeatures [ i ] . category = 'D'
list_of_wvfeatures [ i ] . lineid = j1
list_of_wvfeatures [ i ] . funcost = min ( f1 , f2 )
list_of_wvfeatures [ i ] . reference = wv_master [ j1 ]
if abs ( debugplot ) >= 10 :
print ( '\n* Including category D lines:' )
for i in range ( nlines_arc ) :
print ( i , list_of_wvfeatures [ i ] )
pause_debugplot ( debugplot )
# Category E lines
i = 0
if list_of_wvfeatures [ i + 1 ] . line_ok and list_of_wvfeatures [ i + 2 ] . line_ok :
j1 = diagonal_ids [ i ] [ 0 ]
if j1 is not None :
if wvmin_clip <= wv_master [ j1 ] <= wvmax_clip :
list_of_wvfeatures [ i ] . line_ok = True
list_of_wvfeatures [ i ] . category = 'E'
list_of_wvfeatures [ i ] . lineid = diagonal_ids [ i ] [ 0 ]
list_of_wvfeatures [ i ] . funcost = diagonal_funcost [ i ] [ 0 ]
list_of_wvfeatures [ i ] . reference = wv_master [ j1 ]
i = nlines_arc - 1
if list_of_wvfeatures [ i - 2 ] . line_ok and list_of_wvfeatures [ i - 1 ] . line_ok :
j1 = diagonal_ids [ i ] [ 0 ]
if j1 is not None :
if wvmin_clip <= wv_master [ j1 ] <= wvmax_clip :
list_of_wvfeatures [ i ] . line_ok = True
list_of_wvfeatures [ i ] . category = 'E'
list_of_wvfeatures [ i ] . lineid = diagonal_ids [ i ] [ 0 ]
list_of_wvfeatures [ i ] . funcost = diagonal_funcost [ i ] [ 0 ]
list_of_wvfeatures [ i ] . reference = wv_master [ j1 ]
if abs ( debugplot ) >= 10 :
print ( '\n* Including category E lines:' )
for i in range ( nlines_arc ) :
print ( i , list_of_wvfeatures [ i ] )
pause_debugplot ( debugplot )
fit_list_of_wvfeatures ( list_of_wvfeatures , naxis1_arc , crpix1 , poly_degree_wfit , weighted = False , geometry = geometry , debugplot = debugplot )
# Check that the solutions do not contain duplicated values . If
# they are present ( probably due to the influence of an unknown
# line that unfortunately falls too close to a real line in the
# master table ) , we keep the solution with the lowest cost
# function . The removed lines are labelled as category = ' R ' . The
# procedure is repeated several times in case a line appears more
# than twice .
lduplicated = True
nduplicated = 0
while lduplicated :
lduplicated = False
for i1 in range ( nlines_arc ) :
if list_of_wvfeatures [ i1 ] . line_ok :
j1 = list_of_wvfeatures [ i1 ] . lineid
for i2 in range ( i1 + 1 , nlines_arc ) :
if list_of_wvfeatures [ i2 ] . line_ok :
j2 = list_of_wvfeatures [ i2 ] . lineid
if j1 == j2 :
lduplicated = True
nduplicated += 1
f1 = list_of_wvfeatures [ i1 ] . funcost
f2 = list_of_wvfeatures [ i2 ] . funcost
if f1 < f2 :
list_of_wvfeatures [ i2 ] . line_ok = False
list_of_wvfeatures [ i2 ] . category = 'R'
# do not uncomment the next line :
# list _ of _ wvfeatures [ i2 ] . reference = None
else :
list_of_wvfeatures [ i1 ] . line_ok = False
list_of_wvfeatures [ i1 ] . category = 'R'
# do not uncomment the next line :
# list _ of _ wvfeatures [ i1 ] . reference = None
if abs ( debugplot ) >= 10 :
if nduplicated > 0 :
print ( '\n* Removing category R lines:' )
for i in range ( nlines_arc ) :
print ( i , list_of_wvfeatures [ i ] )
fit_list_of_wvfeatures ( list_of_wvfeatures , naxis1_arc , crpix1 , poly_degree_wfit , weighted = False , geometry = geometry , debugplot = debugplot )
else :
print ( '\n* No duplicated category R lines have been found' )
# Filter out points with a large deviation from a robust linear
# fit . The filtered lines are labelled as category = ' T ' .
if abs ( debugplot ) >= 10 :
print ( '\n>>> Theil-Sen filtering...' )
nfit , ifit , xfit , yfit , wfit = select_data_for_fit ( list_of_wvfeatures )
if nfit < 5 :
nremoved = 0
if abs ( debugplot ) >= 10 :
print ( "nfit=" , nfit )
print ( "=> Skipping Theil-Sen filtering!" )
else :
intercept , slope = fit_theil_sen ( xfit , yfit )
if abs ( debugplot ) >= 10 :
cdelt1_approx = slope
crval1_approx = intercept + slope * crpix1
print ( '>>> Theil-Sen CRVAL1: ' , crval1_approx )
print ( '>>> Theil-Sen CDELT1: ' , cdelt1_approx )
rfit = yfit - ( intercept + slope * xfit )
if abs ( debugplot ) >= 10 :
print ( 'rfit:\n' , rfit )
sigma_rfit = robust_std ( rfit )
if abs ( debugplot ) >= 10 :
print ( 'robust std:' , sigma_rfit )
print ( 'normal std:' , np . std ( rfit ) )
nremoved = 0
for i in range ( nfit ) :
if abs ( rfit [ i ] ) > times_sigma_theil_sen * sigma_rfit :
list_of_wvfeatures [ ifit [ i ] ] . line_ok = False
list_of_wvfeatures [ ifit [ i ] ] . category = 'T'
# do not uncomment the next line :
# list _ of _ wvfeatures [ ifit [ i ] ] . reference = None
nremoved += 1
if abs ( debugplot ) >= 10 :
if nremoved > 0 :
print ( '\n* Removing category T lines:' )
for i in range ( nlines_arc ) :
print ( i , list_of_wvfeatures [ i ] )
fit_list_of_wvfeatures ( list_of_wvfeatures , naxis1_arc , crpix1 , poly_degree_wfit , weighted = False , geometry = geometry , debugplot = debugplot )
else :
print ( '\nNo category T lines have been found and removed' )
# Filter out points that deviates from a polynomial fit . The
# filtered lines are labelled as category = ' P ' .
if times_sigma_polfilt > 0 :
if abs ( debugplot ) >= 10 :
print ( '\n>>> Polynomial filtering...' )
nfit , ifit , xfit , yfit , wfit = select_data_for_fit ( list_of_wvfeatures )
if nfit <= poly_degree_wfit :
print ( "nfit=" , nfit )
raise ValueError ( "Insufficient number of points for fit." )
# Note : do not use weighted fit because the weights can be very
# different and the fit is , in practice , forced to pass through
# some points while ignoring other points . Sometimes this leads to
# the rejection of valid points ( especially at the borders ) .
poly = Polynomial . fit ( x = xfit , y = yfit , deg = poly_degree_wfit )
poly = Polynomial . cast ( poly )
rfit = yfit - poly ( xfit )
if abs ( debugplot ) >= 10 :
print ( 'rfit:' , rfit )
sigma_rfit = robust_std ( rfit )
if abs ( debugplot ) >= 10 :
print ( 'robust std:' , sigma_rfit )
print ( 'normal std:' , np . std ( rfit ) )
nremoved = 0
for i in range ( nfit ) :
if abs ( rfit [ i ] ) > times_sigma_polfilt * sigma_rfit :
list_of_wvfeatures [ ifit [ i ] ] . line_ok = False
list_of_wvfeatures [ ifit [ i ] ] . category = 'P'
# do not uncomment the next line :
# list _ of _ wvfeatures [ ifit [ i ] ] . reference = None
nremoved += 1
if abs ( debugplot ) >= 10 :
if nremoved > 0 :
print ( '\n* Removing category P lines:' )
for i in range ( nlines_arc ) :
print ( i , list_of_wvfeatures [ i ] )
fit_list_of_wvfeatures ( list_of_wvfeatures , naxis1_arc , crpix1 , poly_degree_wfit , weighted = False , geometry = geometry , debugplot = debugplot )
else :
print ( '\nNo category P lines have been found and removed' )
else :
if abs ( debugplot ) >= 10 :
print ( '\n=> Skipping polynomial filtering!' )
# Remove outliers using the Cook distance . The filtered lines are
# labelled as category = ' K ' .
if times_sigma_cook > 0 :
if abs ( debugplot ) >= 10 :
print ( '\n>>> Removing outliers using Cook distance...' )
nfit , ifit , xfit , yfit , wfit = select_data_for_fit ( list_of_wvfeatures )
# There must be enough points to compute reasonable Cook distances
if nfit <= poly_degree_wfit + 3 :
nremoved = 0
if abs ( debugplot ) >= 10 :
print ( "nfit=" , nfit )
print ( "=> Skipping outliers detection using Cook distance!" )
else :
poly , yres , reject = polfit_residuals_with_cook_rejection ( x = xfit , y = yfit , deg = poly_degree_wfit , times_sigma_cook = times_sigma_cook , geometry = geometry , debugplot = debugplot )
nremoved = 0
for i in range ( nfit ) :
if abs ( reject [ i ] ) :
list_of_wvfeatures [ ifit [ i ] ] . line_ok = False
list_of_wvfeatures [ ifit [ i ] ] . category = 'K'
# do not uncomment the next line :
# list _ of _ wvfeatures [ ifit [ i ] ] . reference = None
nremoved += 1
if abs ( debugplot ) >= 10 :
if nremoved > 0 :
print ( '\n* Removing category K lines:' )
for i in range ( nlines_arc ) :
print ( i , list_of_wvfeatures [ i ] )
fit_list_of_wvfeatures ( list_of_wvfeatures , naxis1_arc , crpix1 , poly_degree_wfit , weighted = False , geometry = geometry , debugplot = debugplot )
else :
print ( '\nNo category K lines have been found and removed' )
else :
if abs ( debugplot ) >= 10 :
print ( '\n=> Skipping outlier detection using Cook distance!' )
# If all the arc lines have been identified , compute the final
# fit and exit
line_ok = np . array ( [ wvfeature . line_ok for wvfeature in list_of_wvfeatures ] )
if np . all ( line_ok ) :
return list_of_wvfeatures
# Include unidentified lines by using the prediction of the
# polynomial fit to the current set of identified lines . The
# included lines are labelled as category = ' I ' .
loop_include_new_lines = True
new_lines_included = False
while loop_include_new_lines :
if abs ( debugplot ) >= 10 :
print ( '\n>>> Polynomial prediction of unknown lines...' )
nfit , ifit , xfit , yfit , wfit = select_data_for_fit ( list_of_wvfeatures )
if nfit <= poly_degree_wfit :
raise ValueError ( "Insufficient number of points for fit." )
poly = Polynomial . fit ( x = xfit , y = yfit , deg = poly_degree_wfit )
poly = Polynomial . cast ( poly )
rfit = yfit - poly ( xfit )
if abs ( debugplot ) >= 10 :
print ( 'rfit:\n' , rfit )
sigma_rfit = robust_std ( rfit )
if abs ( debugplot ) >= 10 :
print ( 'robust std:' , sigma_rfit )
print ( 'normal std:' , np . std ( rfit ) )
intercept , slope = fit_theil_sen ( xfit , yfit )
if abs ( debugplot ) >= 10 :
print ( 'crval1, cdelt1 (linear fit):' , intercept , slope )
list_id_already_found = [ ]
list_funcost_already_found = [ ]
for i in range ( nlines_arc ) :
if list_of_wvfeatures [ i ] . line_ok :
list_id_already_found . append ( list_of_wvfeatures [ i ] . lineid )
list_funcost_already_found . append ( list_of_wvfeatures [ i ] . funcost )
nnewlines = 0
for i in range ( nlines_arc ) :
if not list_of_wvfeatures [ i ] . line_ok :
zfit = poly ( xpos_arc [ i ] )
# predicted wavelength
isort = np . searchsorted ( wv_master , zfit )
if isort == 0 :
ifound = 0
dlambda = wv_master [ ifound ] - zfit
elif isort == nlines_master :
ifound = isort - 1
dlambda = zfit - wv_master [ ifound ]
else :
dlambda1 = zfit - wv_master [ isort - 1 ]
dlambda2 = wv_master [ isort ] - zfit
if dlambda1 < dlambda2 :
ifound = isort - 1
dlambda = dlambda1
else :
ifound = isort
dlambda = dlambda2
if abs ( debugplot ) >= 10 :
print ( i , ifound , wv_master [ ifound ] , zfit , dlambda )
if ifound not in list_id_already_found : # unused line
condition1 = dlambda < times_sigma_inclusion * sigma_rfit
condition2 = dlambda / slope < error_xpos_arc
if condition1 or condition2 :
list_id_already_found . append ( ifound )
list_of_wvfeatures [ i ] . line_ok = True
list_of_wvfeatures [ i ] . category = 'I'
list_of_wvfeatures [ i ] . lineid = ifound
# assign the worse cost function value
list_of_wvfeatures [ i ] . funcost = max ( list_funcost_already_found )
list_of_wvfeatures [ i ] . reference = wv_master [ ifound ]
nnewlines += 1
if abs ( debugplot ) >= 10 :
if nnewlines > 0 :
new_lines_included = True
print ( '\n* Including category I lines:' )
for i in range ( nlines_arc ) :
print ( i , list_of_wvfeatures [ i ] )
fit_list_of_wvfeatures ( list_of_wvfeatures , naxis1_arc , crpix1 , poly_degree_wfit , weighted = False , geometry = geometry , debugplot = debugplot )
else :
if new_lines_included :
print ( "\nNo additional category I lines have been found " + "and added" )
else :
print ( '\nNo category I lines have been found and added' )
if nnewlines == 0 :
loop_include_new_lines = False
return list_of_wvfeatures
|
def reset_generation ( self , trigger ) :
"""Re - arms the analog output according to current settings
: param trigger : name of the trigger terminal . ` ` None ` ` value means generation begins immediately on run
: type trigger : str"""
|
self . tone_lock . acquire ( )
npts = self . stim . size
try :
self . aotask = AOTaskFinite ( self . aochan , self . fs , npts , trigsrc = trigger )
self . aotask . write ( self . stim )
if self . attenuator is not None :
self . attenuator . SetAtten ( self . atten )
else : # print " ERROR : attenuation not set ! "
pass
# raise
self . ngenerated += 1
if self . stim_changed :
new_gen = self . stim
else :
new_gen = None
self . stim_changed = False
except :
print u'ERROR! TERMINATE!'
self . tone_lock . release ( )
raise
self . tone_lock . release ( )
return new_gen
|
def assertFileSizeGreater ( self , filename , size , msg = None ) :
'''Fail if ` ` filename ` ` ' s size is not greater than ` ` size ` ` as
determined by the ' > ' operator .
Parameters
filename : str , bytes , file - like
size : int , float
msg : str
If not provided , the : mod : ` marbles . mixins ` or
: mod : ` unittest ` standard message will be used .
Raises
TypeError
If ` ` filename ` ` is not a str or bytes object and is not
file - like .'''
|
fsize = self . _get_file_size ( filename )
self . assertGreater ( fsize , size , msg = msg )
|
def _archive_write_data ( archive , data ) :
"""Write data to archive . This will only be called with a non - empty string ."""
|
n = libarchive . calls . archive_write . c_archive_write_data ( archive , ctypes . cast ( ctypes . c_char_p ( data ) , ctypes . c_void_p ) , len ( data ) )
if n == 0 :
message = c_archive_error_string ( archive )
raise ValueError ( "No bytes were written. Error? [%s]" % ( message ) )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.