signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def parse_access_token ( self ) :
"""Extract the secret and token values from the access _ token file"""
|
access_file = os . path . join ( self . file_path , 'access_token' )
# Ensure that the access _ token file exists
if os . path . isfile ( access_file ) : # Initialise a list to store the secret and token
access_list = list ( )
with open ( access_file , 'r' ) as access_token :
for line in access_token :
value , data = line . split ( '=' )
access_list . append ( data . rstrip ( ) )
# Set the variables appropriately
self . access_secret = access_list [ 0 ]
self . access_token = access_list [ 1 ]
else :
print ( 'Missing access_token' )
self . get_request_token ( )
self . get_access_token ( )
|
def dispatch_to_awaiting ( self , result ) :
"""Send dat ato the appropriate queues"""
|
# If we are awaiting to login , then we might also get
# an abort message . Handle that here . . . .
if self . _state == STATE_AUTHENTICATING : # If the authentication message is something unexpected ,
# we ' ll just ignore it for now
if result == WAMP_ABORT or result == WAMP_WELCOME or result == WAMP_GOODBYE :
self . _welcome_queue . put ( result )
return
try :
request_id = result . request_id
if request_id in self . _requests_pending :
self . _requests_pending [ request_id ] . put ( result )
del self . _requests_pending [ request_id ]
except :
raise Exception ( "Response does not have a request id. Do not know who to send data to. Data: {} " . format ( result . dump ( ) ) )
|
def _or_join ( self , terms ) :
"""Joins terms using OR operator .
Args :
terms ( list ) : terms to join
Examples :
self . _ or _ join ( [ ' term1 ' , ' term2 ' ] ) - > ' term1 OR term2'
Returns :
str"""
|
if isinstance ( terms , ( tuple , list ) ) :
if len ( terms ) > 1 :
return '(' + ' OR ' . join ( terms ) + ')'
else :
return terms [ 0 ]
else :
return terms
|
def make_serviceitem_servicedllsignatureexists ( dll_sig_exists , condition = 'is' , negate = False ) :
"""Create a node for ServiceItem / serviceDLLSignatureExists
: return : A IndicatorItem represented as an Element node"""
|
document = 'ServiceItem'
search = 'ServiceItem/serviceDLLSignatureExists'
content_type = 'bool'
content = dll_sig_exists
ii_node = ioc_api . make_indicatoritem_node ( condition , document , search , content_type , content , negate = negate )
return ii_node
|
def build_job ( name = None , parameters = None ) :
'''Initiate a build for the provided job .
: param name : The name of the job is check if it exists .
: param parameters : Parameters to send to the job .
: return : True is successful , otherwise raise an exception .
CLI Example :
. . code - block : : bash
salt ' * ' jenkins . build _ job jobname'''
|
if not name :
raise SaltInvocationError ( 'Required parameter \'name\' is missing' )
server = _connect ( )
if not job_exists ( name ) :
raise CommandExecutionError ( 'Job \'{0}\' does not exist.' . format ( name ) )
try :
server . build_job ( name , parameters )
except jenkins . JenkinsException as err :
raise CommandExecutionError ( 'Encountered error building job \'{0}\': {1}' . format ( name , err ) )
return True
|
def wishart_pfaffian ( self ) :
"""ndarray of wishart pfaffian CDF , before normalization"""
|
return np . array ( [ Pfaffian ( self , val ) . value for i , val in np . ndenumerate ( self . _chisq ) ] ) . reshape ( self . _chisq . shape )
|
def comment_delete ( self , comment_id ) :
"""Remove a specific comment ( Requires login ) .
Parameters :
comment _ id ( int ) : The id number of the comment to remove ."""
|
return self . _get ( 'comments/{0}.json' . format ( comment_id ) , method = 'DELETE' , auth = True )
|
def ExtractEvents ( self , parser_mediator , registry_key , codepage = 'cp1252' , ** kwargs ) :
"""Extracts events from a Windows Registry key .
Args :
parser _ mediator ( ParserMediator ) : mediates interactions between parsers
and other components , such as storage and dfvfs .
registry _ key ( dfwinreg . WinRegistryKey ) : Windows Registry key .
codepage ( Optional [ str ] ) : extended ASCII string codepage ."""
|
self . _ParseMRUListExKey ( parser_mediator , registry_key , codepage = codepage )
|
def to_frame ( self , data , state ) :
"""Extract a single frame from the data buffer . The consumed
data should be removed from the buffer . If no complete frame
can be read , must raise a ` ` NoFrames ` ` exception .
: param data : A ` ` bytearray ` ` instance containing the data so
far read .
: param state : An instance of ` ` FramerState ` ` . If the buffer
contains a partial frame , this object can be
used to store state information to allow the
remainder of the frame to be read .
: returns : A frame . The frame may be any object . The stock
framers always return bytes ."""
|
# Find the next newline
data_len = data . find ( b'\n' )
if data_len < 0 : # No line to extract
raise exc . NoFrames ( )
# Track how much to exclude
frame_len = data_len + 1
# Are we to exclude carriage returns ?
if ( self . carriage_return and data_len and data [ data_len - 1 ] == ord ( b'\r' ) ) :
data_len -= 1
# Extract the frame
frame = six . binary_type ( data [ : data_len ] )
del data [ : frame_len ]
# Return the frame
return frame
|
async def rev_regs ( self ) -> list :
"""Return list of revocation registry identifiers for which HolderProver has associated tails files .
The operation creates associations for any ( newly copied , via service wrapper API ) tails files without .
: return : list of revocation registry identifiers for which HolderProver has associated tails files"""
|
LOGGER . debug ( 'HolderProver.rev_regs >>>' )
for path_rr_id in Tails . links ( self . _dir_tails ) :
await self . _sync_revoc_for_proof ( basename ( path_rr_id ) )
rv = [ basename ( f ) for f in Tails . links ( self . _dir_tails ) ]
LOGGER . debug ( 'HolderProver.rev_regs <<< %s' , rv )
return rv
|
def is_qualimap_compatible ( gtf ) :
"""Qualimap needs a very specific GTF format or it fails , so skip it if
the GTF is not in that format"""
|
if not gtf :
return False
db = get_gtf_db ( gtf )
def qualimap_compatible ( feature ) :
gene_id = feature . attributes . get ( 'gene_id' , [ None ] ) [ 0 ]
transcript_id = feature . attributes . get ( 'transcript_id' , [ None ] ) [ 0 ]
gene_biotype = feature . attributes . get ( 'gene_biotype' , [ None ] ) [ 0 ]
return gene_id and transcript_id and gene_biotype
for feature in db . all_features ( ) :
if qualimap_compatible ( feature ) :
return True
return False
|
def sparseHealpixFiles ( title , infiles , field = 'MAGLIM' , ** kwargs ) :
"""Inputs : field"""
|
# map = ugali . utils . skymap . readSparseHealpixMaps ( infiles , field )
map = ugali . utils . skymap . read_partial_map ( infiles , field )
ax = hp . mollview ( map = map , title = title , ** kwargs )
return ax , map
|
def name ( self ) :
"""Name of the resource . If conversion to unicode somehow
didn ' t go well value is returned in base64 encoding ."""
|
return ( self . _raw_data . get ( ATTR_NAME_UNICODE ) or self . _raw_data . get ( ATTR_NAME ) or "" )
|
def make_even_size ( x ) :
"""Pad x to be even - sized on axis 1 and 2 , but only if necessary ."""
|
x_shape = x . get_shape ( ) . as_list ( )
assert len ( x_shape ) > 2 , "Only 3+-dimensional tensors supported."
shape = [ dim if dim is not None else - 1 for dim in x_shape ]
new_shape = x_shape
# To make sure constant shapes remain constant .
if x_shape [ 1 ] is not None :
new_shape [ 1 ] = 2 * int ( math . ceil ( x_shape [ 1 ] * 0.5 ) )
if x_shape [ 2 ] is not None :
new_shape [ 2 ] = 2 * int ( math . ceil ( x_shape [ 2 ] * 0.5 ) )
if shape [ 1 ] % 2 == 0 and shape [ 2 ] % 2 == 0 :
return x
if shape [ 1 ] % 2 == 0 :
x , _ = pad_to_same_length ( x , x , final_length_divisible_by = 2 , axis = 2 )
x . set_shape ( new_shape )
return x
if shape [ 2 ] % 2 == 0 :
x , _ = pad_to_same_length ( x , x , final_length_divisible_by = 2 , axis = 1 )
x . set_shape ( new_shape )
return x
x , _ = pad_to_same_length ( x , x , final_length_divisible_by = 2 , axis = 1 )
x , _ = pad_to_same_length ( x , x , final_length_divisible_by = 2 , axis = 2 )
x . set_shape ( new_shape )
return x
|
def post_loader ( * decorator_args , serializer ) :
"""Decorator to automatically instantiate a model from json request data
: param serializer : The ModelSerializer to use to load data from the request"""
|
def wrapped ( fn ) :
@ wraps ( fn )
def decorated ( * args , ** kwargs ) :
return fn ( * serializer . load ( request . get_json ( ) ) )
return decorated
if decorator_args and callable ( decorator_args [ 0 ] ) :
return wrapped ( decorator_args [ 0 ] )
return wrapped
|
def _check_for_int ( x ) :
"""This is a compatibility function that takes a C { float } and converts it to an
C { int } if the values are equal ."""
|
try :
y = int ( x )
except ( OverflowError , ValueError ) :
pass
else : # There is no way in AMF0 to distinguish between integers and floats
if x == x and y == x :
return y
return x
|
def _install_nuke ( use_threaded_wrapper ) :
"""Helper function to The Foundry Nuke support"""
|
import nuke
not_nuke_launch = ( "--hiero" in nuke . rawArgs or "--studio" in nuke . rawArgs or "--nukeassist" in nuke . rawArgs )
if not_nuke_launch :
raise ImportError
def threaded_wrapper ( func , * args , ** kwargs ) :
return nuke . executeInMainThreadWithResult ( func , args , kwargs )
_common_setup ( "Nuke" , threaded_wrapper , use_threaded_wrapper )
|
def after ( self ) :
"""Return a deferred that will fire after the request is finished .
Returns :
Deferred : a new deferred that will fire appropriately"""
|
d = Deferred ( )
self . _after_deferreds . append ( d )
return d . chain
|
def step ( self , stash = 'active' , n = None , selector_func = None , step_func = None , successor_func = None , until = None , filter_func = None , ** run_args ) :
"""Step a stash of states forward and categorize the successors appropriately .
The parameters to this function allow you to control everything about the stepping and
categorization process .
: param stash : The name of the stash to step ( default : ' active ' )
: param selector _ func : If provided , should be a function that takes a state and returns a
boolean . If True , the state will be stepped . Otherwise , it will be
kept as - is .
: param step _ func : If provided , should be a function that takes a SimulationManager and
returns a SimulationManager . Will be called with the SimulationManager
at every step . Note that this function should not actually perform any
stepping - it is meant to be a maintenance function called after each step .
: param successor _ func : If provided , should be a function that takes a state and return its successors .
Otherwise , project . factory . successors will be used .
: param filter _ func : If provided , should be a function that takes a state and return the name
of the stash , to which the state should be moved .
: param until : ( DEPRECATED ) If provided , should be a function that takes a SimulationManager and
returns True or False . Stepping will terminate when it is True .
: param n : ( DEPRECATED ) The number of times to step ( default : 1 if " until " is not provided )
Additionally , you can pass in any of the following keyword args for project . factory . successors :
: param jumpkind : The jumpkind of the previous exit
: param addr : An address to execute at instead of the state ' s ip .
: param stmt _ whitelist : A list of stmt indexes to which to confine execution .
: param last _ stmt : A statement index at which to stop execution .
: param thumb : Whether the block should be lifted in ARM ' s THUMB mode .
: param backup _ state : A state to read bytes from instead of using project memory .
: param opt _ level : The VEX optimization level to use .
: param insn _ bytes : A string of bytes to use for the block instead of the project .
: param size : The maximum size of the block , in bytes .
: param num _ inst : The maximum number of instructions .
: param traceflags : traceflags to be passed to VEX . Default : 0
: returns : The simulation manager , for chaining .
: rtype : SimulationManager"""
|
l . info ( "Stepping %s of %s" , stash , self )
# 8 < - - - - - Compatibility layer - - - - -
if n is not None or until is not None :
if once ( 'simgr_step_n_until' ) :
print ( "\x1b[31;1mDeprecation warning: the use of `n` and `until` arguments is deprecated. " "Consider using simgr.run() with the same arguments if you want to specify " "a number of steps or an additional condition on when to stop the execution.\x1b[0m" )
return self . run ( stash , n , until , selector_func = selector_func , step_func = step_func , successor_func = successor_func , filter_func = filter_func , ** run_args )
# - - - - - Compatibility layer - - - - - > 8
bucket = defaultdict ( list )
for state in self . _fetch_states ( stash = stash ) :
goto = self . filter ( state , filter_func = filter_func )
if isinstance ( goto , tuple ) :
goto , state = goto
if goto not in ( None , stash ) :
bucket [ goto ] . append ( state )
continue
if not self . selector ( state , selector_func = selector_func ) :
bucket [ stash ] . append ( state )
continue
pre_errored = len ( self . _errored )
successors = self . step_state ( state , successor_func = successor_func , ** run_args )
# handle degenerate stepping cases here . desired behavior :
# if a step produced only unsat states , always add them to the unsat stash since this usually indicates a bug
# if a step produced sat states and save _ unsat is False , drop the unsats
# if a step produced no successors , period , add the original state to deadended
# first check if anything happened besides unsat . that gates all this behavior
if not any ( v for k , v in successors . items ( ) if k != 'unsat' ) and len ( self . _errored ) == pre_errored : # then check if there were some unsats
if successors . get ( 'unsat' , [ ] ) : # only unsats . current setup is acceptable .
pass
else : # no unsats . we ' ve deadended .
bucket [ 'deadended' ] . append ( state )
continue
else : # there were sat states . it ' s okay to drop the unsat ones if the user said so .
if not self . _save_unsat :
successors . pop ( 'unsat' , None )
for to_stash , successor_states in successors . items ( ) :
bucket [ to_stash or stash ] . extend ( successor_states )
self . _clear_states ( stash = stash )
for to_stash , states in bucket . items ( ) :
self . _store_states ( to_stash or stash , states )
if step_func is not None :
return step_func ( self )
return self
|
def bingham_pdf ( fit ) :
"""From the * Encyclopedia of Paleomagnetism *
From Onstott , 1980:
Vector resultant : R is analogous to eigenvectors
of T .
Eigenvalues are analogous to | R | / N ."""
|
# Uses eigenvectors of the covariance matrix
e = fit . hyperbolic_axes
# singular _ values
# e = sampling _ covariance ( fit ) # not sure
e = e [ 2 ] ** 2 / e
kappa = ( e - e [ 2 ] ) [ : - 1 ]
kappa /= kappa [ - 1 ]
F = N . sqrt ( N . pi ) * confluent_hypergeometric_function ( * kappa )
ax = fit . axes
Z = 1 / e
M = ax
F = 1 / hyp1f1 ( * 1 / Z )
def pdf ( coords ) :
lon , lat = coords
I = lat
D = lon
# + N . pi / 2
# D , I = _ rotate ( N . degrees ( D ) , N . degrees ( I ) , 90)
# Bingham is given in spherical coordinates of inclination
# and declination in radians
# From USGS bingham statistics reference
xhat = N . array ( sph2cart ( lon , lat ) ) . T
# return F * expm ( dot ( xhat . T , M , N . diag ( Z ) , M . T , xhat ) )
return 1 / ( F * N . exp ( dot ( xhat . T , M , N . diag ( Z ) , M . T , xhat ) ) )
return pdf
|
def wait ( self , timeout = 15 ) :
"""block until pod is not ready , raises an exc ProbeTimeout if timeout is reached
: param timeout : int or float ( seconds ) , time to wait for pod to run
: return : None"""
|
Probe ( timeout = timeout , fnc = self . is_ready , expected_retval = True ) . run ( )
|
def _add_seg_to_output ( out , data , enumerate_chroms = False ) :
"""Export outputs to ' seg ' format compatible with IGV and GenePattern ."""
|
out_file = "%s.seg" % os . path . splitext ( out [ "cns" ] ) [ 0 ]
if not utils . file_exists ( out_file ) :
with file_transaction ( data , out_file ) as tx_out_file :
cmd = [ os . path . join ( os . path . dirname ( sys . executable ) , "cnvkit.py" ) , "export" , "seg" ]
if enumerate_chroms :
cmd += [ "--enumerate-chroms" ]
cmd += [ "-o" , tx_out_file , out [ "cns" ] ]
do . run ( cmd , "CNVkit export seg" )
out [ "seg" ] = out_file
return out
|
def save ( self , path , name , save_meta = True ) :
'''Saves model as a sequence of files in the format :
{ path } / { name } _ { ' dec ' , ' disc ' , ' dec _ opt ' ,
' disc _ opt ' , ' meta ' } . h5
Parameters
path : str
The directory of the file you wish to save the model to .
name : str
The name prefix of the model and optimizer files you wish
to save .
save _ meta [ optional ] : bool
Flag that controls whether to save the class metadata along with
the generator , discriminator , and respective optimizer states .'''
|
_save_model ( self . dec , str ( path ) , "%s_dec" % str ( name ) )
_save_model ( self . disc , str ( path ) , "%s_disc" % str ( name ) )
_save_model ( self . dec_opt , str ( path ) , "%s_dec_opt" % str ( name ) )
_save_model ( self . disc_opt , str ( path ) , "%s_disc_opt" % str ( name ) )
if save_meta :
self . _save_meta ( os . path . join ( path , "%s_meta" % str ( name ) ) )
|
from typing import List
def separate_parenthesis_groups ( paren_string : str ) -> List [ str ] :
"""Separate groups of balanced , non - nested parentheses into separate strings .
Args :
paren _ string ( str ) : String containing multiple groups of nested parentheses .
Returns :
List [ str ] : List of separated groups of parentheses .
Example :
> > > separate _ parenthesis _ groups ( ' ( ) ( ( ) ) ( ( ) ( ) ) ' )"""
|
# Remove spaces and initiate variables
paren_string = paren_string . replace ( ' ' , '' )
balance = 0
groups = [ ]
start_index = 0
# Iterate over the characters
for i , char in enumerate ( paren_string ) : # Increase balance for ' ( ' and decrease for ' ) '
balance += 1 if char == '(' else - 1
# If balance becomes 0 , we found a group
if balance == 0 :
groups . append ( paren_string [ start_index : i + 1 ] )
start_index = i + 1
# Move the start index for the next group
return groups
|
def do_access_control ( self ) :
"""` before _ request ` handler to check if user should be redirected to
login page ."""
|
from abilian . services import get_service
if current_app . testing and current_app . config . get ( "NO_LOGIN" ) : # Special case for tests
user = User . query . get ( 0 )
login_user ( user , force = True )
return
state = self . app_state
user = unwrap ( current_user )
# Another special case for tests
if current_app . testing and getattr ( user , "is_admin" , False ) :
return
security = get_service ( "security" )
user_roles = frozenset ( security . get_roles ( user ) )
endpoint = request . endpoint
blueprint = request . blueprint
access_controllers = [ ]
access_controllers . extend ( state . bp_access_controllers . get ( None , [ ] ) )
if blueprint and blueprint in state . bp_access_controllers :
access_controllers . extend ( state . bp_access_controllers [ blueprint ] )
if endpoint and endpoint in state . endpoint_access_controllers :
access_controllers . extend ( state . endpoint_access_controllers [ endpoint ] )
for access_controller in reversed ( access_controllers ) :
verdict = access_controller ( user = user , roles = user_roles )
if verdict is None :
continue
elif verdict is True :
return
else :
if user . is_anonymous :
return self . redirect_to_login ( )
raise Forbidden ( )
# default policy
if current_app . config . get ( "PRIVATE_SITE" ) and user . is_anonymous :
return self . redirect_to_login ( )
|
def removeZeroLenPadding ( str , blocksize = AES_blocksize ) :
'Remove Padding with zeroes + last byte equal to the number of padding bytes'
|
try :
pad_len = ord ( str [ - 1 ] )
# last byte contains number of padding bytes
except TypeError :
pad_len = str [ - 1 ]
assert pad_len < blocksize , 'padding error'
assert pad_len < len ( str ) , 'padding error'
return str [ : - pad_len ]
|
def read ( self , size = - 1 ) :
"""Read data from the ring buffer into a new buffer .
This advances the read index after reading ;
calling : meth : ` advance _ read _ index ` is * not * necessary .
: param size : The number of elements to be read .
If not specified , all available elements are read .
: type size : int , optional
: returns : A new buffer containing the read data .
Its size may be less than the requested * size * .
: rtype : buffer"""
|
if size < 0 :
size = self . read_available
data = self . _ffi . new ( 'unsigned char[]' , size * self . elementsize )
size = self . readinto ( data )
return self . _ffi . buffer ( data , size * self . elementsize )
|
def _apply ( self , ctx : ExtensionContext ) -> Any :
"""Loads a yaml fragment from an external file .
Args :
ctx : The processing context .
Returns :
The external resource as a python dictionary . The fragment is already send through
the processor as well ."""
|
_ , external_path = ctx . node
return ctx . mentor . load_yaml ( self . locator ( external_path , cast ( str , ctx . document ) if Validator . is_file ( document = ctx . document ) else None ) )
|
def length ( value , min = None , max = None ) :
"""Return whether or not the length of given string is within a specified
range .
Examples : :
> > > length ( ' something ' , min = 2)
True
> > > length ( ' something ' , min = 9 , max = 9)
True
> > > length ( ' something ' , max = 5)
ValidationFailure ( func = length , . . . )
: param value :
The string to validate .
: param min :
The minimum required length of the string . If not provided , minimum
length will not be checked .
: param max :
The maximum length of the string . If not provided , maximum length
will not be checked .
. . versionadded : : 0.2"""
|
if ( min is not None and min < 0 ) or ( max is not None and max < 0 ) :
raise AssertionError ( '`min` and `max` need to be greater than zero.' )
return between ( len ( value ) , min = min , max = max )
|
def delete ( self , file_path ) :
"""DELETE
Args :
file _ path : Full path for a file you want to delete
upload _ path : Ndrive path where you want to delete file
ex ) / Picture /
Returns :
True : Delete success
False : Delete failed"""
|
now = datetime . datetime . now ( ) . isoformat ( )
url = nurls [ 'put' ] + upload_path + file_name
headers = { 'userid' : self . user_id , 'useridx' : self . useridx , 'Content-Type' : "application/x-www-form-urlencoded; charset=UTF-8" , 'charset' : 'UTF-8' , 'Origin' : 'http://ndrive2.naver.com' , }
r = self . session . delete ( url = url , headers = headers )
return self . resultManager ( r . text )
|
def add_torrent_task ( self , torrent_path , save_path = '/' , selected_idx = ( ) , ** kwargs ) :
"""添加本地BT任务
: param torrent _ path : 本地种子的路径
: param save _ path : 远程保存路径
: param selected _ idx : 要下载的文件序号 — — 集合为空下载所有 , 非空集合指定序号集合 , 空串下载默认
: return : requests . Response
. . note : :
返回正确时返回的 Reponse 对象 content 中的数据结构
{ " task _ id " : 任务编号 , " rapid _ download " : 是否已经完成 ( 急速下载 ) , " request _ id " : 请求识别号 }"""
|
# 上传种子文件
torrent_handler = open ( torrent_path , 'rb' )
basename = os . path . basename ( torrent_path )
# 清理同名文件
self . delete ( [ '/' + basename ] )
response = self . upload ( '/' , torrent_handler , basename ) . json ( )
remote_path = response [ 'path' ]
logging . debug ( 'REMOTE PATH:' + remote_path )
# 获取种子信息
response = self . _get_torrent_info ( remote_path ) . json ( )
if response . get ( 'error_code' ) :
print ( response . get ( 'error_code' ) )
return
if not response [ 'torrent_info' ] [ 'file_info' ] :
return
# 要下载的文件序号 : 集合为空下载所有 , 非空集合指定序号集合 , 空串下载默认
if isinstance ( selected_idx , ( tuple , list , set ) ) :
if len ( selected_idx ) > 0 :
selected_idx = ',' . join ( map ( str , selected_idx ) )
else :
selected_idx = ',' . join ( map ( str , range ( 1 , len ( response [ 'torrent_info' ] [ 'file_info' ] ) + 1 ) ) )
else :
selected_idx = ''
# 开始下载
data = { 'file_sha1' : response [ 'torrent_info' ] [ 'sha1' ] , 'save_path' : save_path , 'selected_idx' : selected_idx , 'task_from' : '1' , 'source_path' : remote_path , 'type' : '2' # 2 is torrent file
}
url = 'http://{0}/rest/2.0/services/cloud_dl' . format ( BAIDUPAN_SERVER )
return self . _request ( 'create' , 'add_task' , url = url , data = data , ** kwargs )
|
def get_dict_to_print ( field_to_obs ) :
"""Transform the field - to - obs mapping into a printable dictionary .
Args :
field _ to _ obs : Dict that maps string field to ` Observation ` list .
Returns :
A dict with the keys and values to print to console ."""
|
def compressed_steps ( steps ) :
return { 'num_steps' : len ( set ( steps ) ) , 'min_step' : min ( steps ) , 'max_step' : max ( steps ) , 'last_step' : steps [ - 1 ] , 'first_step' : steps [ 0 ] , 'outoforder_steps' : get_out_of_order ( steps ) }
def full_steps ( steps ) :
return { 'steps' : steps , 'outoforder_steps' : get_out_of_order ( steps ) }
output = { }
for field , observations in field_to_obs . items ( ) :
if not observations :
output [ field ] = None
continue
steps = [ x [ 'step' ] for x in observations ]
if field in SHORT_FIELDS :
output [ field ] = compressed_steps ( steps )
if field in LONG_FIELDS :
output [ field ] = full_steps ( steps )
return output
|
def get_fields ( self , strip_labels = False ) :
"""Hook to dynamically change the fields that will be displayed"""
|
if strip_labels :
return [ f [ 0 ] if type ( f ) in ( tuple , list ) else f for f in self . fields ]
return self . fields
|
def start_dut_thread ( self ) : # pylint : disable = no - self - use
"""Start Dut thread .
: return : Nothing"""
|
if Dut . _th is None :
Dut . _run = True
Dut . _sem = Semaphore ( 0 )
Dut . _signalled_duts = deque ( )
Dut . _logger = LogManager . get_bench_logger ( 'Dut' )
Dut . _th = Thread ( target = Dut . run , name = 'DutThread' )
Dut . _th . daemon = True
Dut . _th . start ( )
|
def isOutDated ( self , output_file ) :
"""Figures out if Cyther should compile the given FileInfo object by
checking the both of the modified times"""
|
if output_file . exists ( ) :
source_time = self . getmtime ( )
output_time = output_file . getmtime ( )
return source_time > output_time
else :
return True
|
def _get_stack ( self , orchestration_client , stack_name ) :
"""Get the ID for the current deployed overcloud stack if it exists ."""
|
try :
stack = orchestration_client . stacks . get ( stack_name )
self . log . info ( "Stack found, will be doing a stack update" )
return stack
except HTTPNotFound :
self . log . info ( "No stack found, will be doing a stack create" )
|
def get_similarity_measures ( self ) :
"""Helper function for computing similarity measures ."""
|
if not self . quiet :
print
print "Computing" , self . current_similarity_measure , "similarity..."
self . compute_similarity_scores ( )
|
def _load_hangul_syllable_types ( ) :
"""Helper function for parsing the contents of " HangulSyllableType . txt " from the Unicode Character Database ( UCD ) and
generating a lookup table for determining whether or not a given Hangul syllable is of type " L " , " V " , " T " , " LV " or
" LVT " . For more info on the UCD , see the following website : https : / / www . unicode . org / ucd /"""
|
filename = "HangulSyllableType.txt"
current_dir = os . path . abspath ( os . path . dirname ( __file__ ) )
with codecs . open ( os . path . join ( current_dir , filename ) , mode = "r" , encoding = "utf-8" ) as fp :
for line in fp :
if not line . strip ( ) or line . startswith ( "#" ) :
continue
# Skip empty lines or lines that are comments ( comments start with ' # ' )
data = line . strip ( ) . split ( ";" )
syllable_type , _ = map ( six . text_type . strip , data [ 1 ] . split ( "#" ) )
if ".." in data [ 0 ] : # If it is a range and not a single value
start , end = map ( lambda x : int ( x , 16 ) , data [ 0 ] . strip ( ) . split ( ".." ) )
for idx in range ( start , end + 1 ) :
_hangul_syllable_types [ idx ] = syllable_type
else :
_hangul_syllable_types [ int ( data [ 0 ] . strip ( ) , 16 ) ] = syllable_type
|
def get_modpath ( modname , prefer_pkg = False , prefer_main = False ) :
r"""Returns path to module
Args :
modname ( str or module ) : module name or actual module
Returns :
str : module _ dir
CommandLine :
python - m utool . util _ path - - test - get _ modpath
Setup :
> > > from utool . util _ path import * # NOQA
> > > import utool as ut
> > > utool _ dir = dirname ( dirname ( ut . _ _ file _ _ ) )
Example :
> > > # ENABLE _ DOCTEST
> > > from utool . util _ path import * # NOQA
> > > import utool as ut
> > > utool _ dir = dirname ( dirname ( ut . _ _ file _ _ ) )
> > > modname = ' utool . util _ path '
> > > module _ dir = get _ modpath ( modname )
> > > result = ut . truepath _ relative ( module _ dir , utool _ dir )
> > > result = ut . ensure _ unixslash ( result )
> > > print ( result )
utool / util _ path . py
Example :
> > > # ENABLE _ DOCTEST
> > > from utool . util _ path import * # NOQA
> > > import utool as ut
> > > utool _ dir = dirname ( dirname ( ut . _ _ file _ _ ) )
> > > modname = ' utool . _ internal '
> > > module _ dir = get _ modpath ( modname , prefer _ pkg = True )
> > > result = ut . ensure _ unixslash ( module _ dir )
> > > print ( result )
> > > assert result . endswith ( ' utool / _ internal ' )
Example :
> > > # ENABLE _ DOCTEST
> > > from utool . util _ path import * # NOQA
> > > import utool as ut
> > > utool _ dir = dirname ( dirname ( ut . _ _ file _ _ ) )
> > > modname = ' utool '
> > > module _ dir = get _ modpath ( modname )
> > > result = ut . truepath _ relative ( module _ dir , utool _ dir )
> > > result = ut . ensure _ unixslash ( result )
> > > print ( result )
utool / _ _ init _ _ . py"""
|
import importlib
if isinstance ( modname , six . string_types ) :
module = importlib . import_module ( modname )
else :
module = modname
# Hack
modpath = module . __file__ . replace ( '.pyc' , '.py' )
initname = '__init__.py'
mainname = '__main__.py'
if prefer_pkg :
if modpath . endswith ( initname ) or modpath . endswith ( mainname ) :
modpath = dirname ( modpath )
# modpath = modpath [ : - len ( initname ) ]
if prefer_main :
if modpath . endswith ( initname ) :
main_modpath = modpath [ : - len ( initname ) ] + mainname
if exists ( main_modpath ) :
modpath = main_modpath
# modname = modname . replace ( ' . _ _ init _ _ ' , ' ' ) . strip ( )
# module _ dir = get _ module _ dir ( module )
return modpath
|
def in_download_archive ( track ) :
"""Returns True if a track _ id exists in the download archive"""
|
global arguments
if not arguments [ '--download-archive' ] :
return
archive_filename = arguments . get ( '--download-archive' )
try :
with open ( archive_filename , 'a+' , encoding = 'utf-8' ) as file :
logger . debug ( 'Contents of {0}:' . format ( archive_filename ) )
file . seek ( 0 )
track_id = '{0}' . format ( track [ 'id' ] )
for line in file :
logger . debug ( '"' + line . strip ( ) + '"' )
if line . strip ( ) == track_id :
return True
except IOError as ioe :
logger . error ( 'Error trying to read download archive...' )
logger . debug ( ioe )
return False
|
def StreamFilePath ( self , filepath , offset = 0 , amount = None ) :
"""Streams chunks of a file located at given path starting at given offset .
Args :
filepath : A path to the file to stream .
offset : An integer offset at which the file stream should start on .
amount : An upper bound on number of bytes to read .
Yields :
` Chunk ` instances ."""
|
with open ( filepath , "rb" ) as filedesc :
for chunk in self . StreamFile ( filedesc , offset = offset , amount = amount ) :
yield chunk
|
def find_file ( self , path , tgt_env ) :
'''Find the specified file in the specified environment'''
|
tree = self . get_tree ( tgt_env )
if not tree : # Branch / tag / SHA not found in repo
return None , None , None
blob = None
depth = 0
while True :
depth += 1
if depth > SYMLINK_RECURSE_DEPTH :
blob = None
break
try :
file_blob = tree / path
if stat . S_ISLNK ( file_blob . mode ) : # Path is a symlink . The blob data corresponding to
# this path ' s object ID will be the target of the
# symlink . Follow the symlink and set path to the
# location indicated in the blob data .
stream = six . StringIO ( )
file_blob . stream_data ( stream )
stream . seek ( 0 )
link_tgt = stream . read ( )
stream . close ( )
path = salt . utils . path . join ( os . path . dirname ( path ) , link_tgt , use_posixpath = True )
else :
blob = file_blob
if isinstance ( blob , git . Tree ) : # Path is a directory , not a file .
blob = None
break
except KeyError : # File not found or repo _ path points to a directory
blob = None
break
if isinstance ( blob , git . Blob ) :
return blob , blob . hexsha , blob . mode
return None , None , None
|
def run ( self , reset_current_buffer = False , pre_run = None ) :
"""Read input from the command line .
This runs the eventloop until a return value has been set .
: param reset _ current _ buffer : XXX : Not used anymore .
: param pre _ run : Callable that is called right after the reset has taken
place . This allows custom initialisation ."""
|
assert pre_run is None or callable ( pre_run )
try :
self . _is_running = True
self . on_start . fire ( )
self . reset ( )
# Call pre _ run .
self . _pre_run ( pre_run )
# Run eventloop in raw mode .
with self . input . raw_mode ( ) :
self . renderer . request_absolute_cursor_position ( )
self . _redraw ( )
self . eventloop . run ( self . input , self . create_eventloop_callbacks ( ) )
finally : # Clean up renderer . ( This will leave the alternate screen , if we use
# that . )
# If exit / abort haven ' t been called set , but another exception was
# thrown instead for some reason , make sure that we redraw in exit
# mode .
if not self . is_done :
self . _exit_flag = True
self . _redraw ( )
self . renderer . reset ( )
self . on_stop . fire ( )
self . _is_running = False
# Return result .
return self . return_value ( )
|
def convert_md_to_rst ( source , destination = None , backup_dir = None ) :
"""Try to convert the source , an . md ( markdown ) file , to an . rst
( reStructuredText ) file at the destination . If the destination isn ' t
provided , it defaults to be the same as the source path except for the
filename extension . If the destination file already exists , it will be
overwritten . In the event of an error , the destination file will be
left untouched ."""
|
# Doing this in the function instead of the module level ensures the
# error occurs when the function is called , rather than when the module
# is evaluated .
try :
import pypandoc
except ImportError : # Don ' t give up right away ; first try to install the python module .
os . system ( "pip install pypandoc" )
import pypandoc
# Set our destination path to a default , if necessary
destination = destination or ( os . path . splitext ( source ) [ 0 ] + '.rst' )
# Likewise for the backup directory
backup_dir = backup_dir or os . path . join ( os . path . dirname ( destination ) , 'bak' )
bak_name = ( os . path . basename ( destination ) + time . strftime ( '.%Y%m%d%H%M%S.bak' ) )
bak_path = os . path . join ( backup_dir , bak_name )
# If there ' s already a file at the destination path , move it out of the
# way , but don ' t delete it .
if os . path . isfile ( destination ) :
if not os . path . isdir ( os . path . dirname ( bak_path ) ) :
os . mkdir ( os . path . dirname ( bak_path ) )
os . rename ( destination , bak_path )
try : # Try to convert the file .
pypandoc . convert ( source , 'rst' , format = 'md' , outputfile = destination )
except : # If for any reason the conversion fails , try to put things back
# like we found them .
if os . path . isfile ( destination ) :
os . remove ( destination )
if os . path . isfile ( bak_path ) :
os . rename ( bak_path , destination )
raise
|
def altitude ( msg ) :
"""Decode aircraft altitude
Args :
msg ( string ) : 28 bytes hexadecimal message string
Returns :
int : altitude in feet"""
|
tc = common . typecode ( msg )
if tc < 9 or tc == 19 or tc > 22 :
raise RuntimeError ( "%s: Not a airborn position message" % msg )
mb = common . hex2bin ( msg ) [ 32 : ]
if tc < 19 : # barometric altitude
q = mb [ 15 ]
if q :
n = common . bin2int ( mb [ 8 : 15 ] + mb [ 16 : 20 ] )
alt = n * 25 - 1000
else :
alt = None
else : # GNSS altitude , meters - > feet
alt = common . bin2int ( mb [ 8 : 20 ] ) * 3.28084
return alt
|
def get_subdomain_history ( self , fqn , start_sequence = None , end_sequence = None , start_zonefile_index = None , end_zonefile_index = None , include_unaccepted = False , offset = None , count = None , cur = None ) :
"""Get the subdomain ' s history over a block range .
By default , only include accepted history items ( but set include _ unaccepted = True to get them all )
No zone files will be loaded .
Returns the list of subdomains in order by sequnce number , and then by parent zonefile index"""
|
sql = 'SELECT * FROM {} WHERE fully_qualified_subdomain = ? {} {} {} {} {} ORDER BY parent_zonefile_index ASC' . format ( self . subdomain_table , 'AND accepted=1' if not include_unaccepted else '' , 'AND parent_zonefile_index >= ?' if start_zonefile_index is not None else '' , 'AND parent_zonefile_index < ?' if end_zonefile_index is not None else '' , 'AND sequence >= ?' if start_sequence is not None else '' , 'AND sequence < ?' if end_sequence is not None else '' )
args = ( fqn , )
if start_zonefile_index is not None :
args += ( start_zonefile_index , )
if end_zonefile_index is not None :
args += ( end_zonefile_index , )
if start_sequence is not None :
args += ( start_sequence , )
if end_sequence is not None :
args += ( end_sequence , )
if count is not None :
sql += ' LIMIT ?'
args += ( count , )
if offset is not None :
sql += ' OFFSET ?'
args += ( offset , )
sql += ';'
if cur is None :
cursor = self . conn . cursor ( )
else :
cursor = cur
rowcursor = db_query_execute ( cursor , sql , args )
rows = [ ]
for rowdata in rowcursor : # want subdomain rec
subrec = self . _extract_subdomain ( rowdata )
rows . append ( subrec )
return rows
|
def create_model ( model_name : Optional [ str ] , params : ModelParams ) -> 'Sequential' :
"""Load or create a precise model
Args :
model _ name : Name of model
params : Parameters used to create the model
Returns :
model : Loaded Keras model"""
|
if model_name and isfile ( model_name ) :
print ( 'Loading from ' + model_name + '...' )
model = load_precise_model ( model_name )
else :
from keras . layers . core import Dense
from keras . layers . recurrent import GRU
from keras . models import Sequential
model = Sequential ( )
model . add ( GRU ( params . recurrent_units , activation = 'linear' , input_shape = ( pr . n_features , pr . feature_size ) , dropout = params . dropout , name = 'net' ) )
model . add ( Dense ( 1 , activation = 'sigmoid' ) )
load_keras ( )
metrics = [ 'accuracy' ] + params . extra_metrics * [ false_pos , false_neg ]
set_loss_bias ( params . loss_bias )
for i in model . layers [ : params . freeze_till ] :
i . trainable = False
model . compile ( 'rmsprop' , weighted_log_loss , metrics = ( not params . skip_acc ) * metrics )
return model
|
def _execute_callback ( async , callback ) :
"""Execute the given callback or insert the Async callback , or if no
callback is given return the async . result ."""
|
from furious . async import Async
if not callback :
return async . result . payload
if isinstance ( callback , Async ) :
return callback . start ( )
return callback ( )
|
def get_transactions ( self ) :
"""Fetches transaction history .
: rtype : ` ` list ` ` of ` ` str ` ` transaction IDs"""
|
self . transactions [ : ] = NetworkAPI . get_transactions ( self . address )
if self . segwit_address :
self . transactions += NetworkAPI . get_transactions ( self . segwit_address )
return self . transactions
|
def sense ( self ) :
"""Return a situation , encoded as a bit string , which represents
the observable state of the environment .
Usage :
situation = scenario . sense ( )
assert isinstance ( situation , BitString )
Arguments : None
Return :
The current situation ."""
|
self . current_situation = bitstrings . BitString ( [ random . randrange ( 2 ) for _ in range ( self . address_size + ( 1 << self . address_size ) ) ] )
return self . current_situation
|
def p_iteration_statement_2 ( self , p ) :
"""iteration _ statement : WHILE LPAREN expr RPAREN statement"""
|
p [ 0 ] = ast . While ( predicate = p [ 3 ] , statement = p [ 5 ] )
|
def normalizeFeatureText ( value ) :
"""Normalizes feature text .
* * * value * * must be a : ref : ` type - string ` .
* Returned value will be an unencoded ` ` unicode ` ` string ."""
|
if not isinstance ( value , basestring ) :
raise TypeError ( "Feature text must be a string, not %s." % type ( value ) . __name__ )
return unicode ( value )
|
def get_immediate_children ( self ) :
"""Return all direct subsidiaries of this company .
Excludes subsidiaries of subsidiaries"""
|
ownership = Ownership . objects . filter ( parent = self )
subsidiaries = Company . objects . filter ( child__in = ownership ) . distinct ( )
return subsidiaries
|
def importcsv ( self ) :
'''import data from csv'''
|
csv_path = os . path . join ( os . path . dirname ( __file__ ) , self . stock_no_files )
with open ( csv_path ) as csv_file :
csv_data = csv . reader ( csv_file )
result = { }
for i in csv_data :
try :
result [ i [ 0 ] ] = str ( i [ 1 ] ) . decode ( 'utf-8' )
except ValueError :
if i [ 0 ] == 'UPDATE' :
self . last_update = str ( i [ 1 ] ) . decode ( 'utf-8' )
else :
pass
return result
|
def is_owner ( package , abspath ) :
"""Determine whether ` abspath ` belongs to ` package ` ."""
|
try :
files = package [ 'files' ]
location = package [ 'location' ]
except KeyError :
return False
paths = ( os . path . abspath ( os . path . join ( location , f ) ) for f in files )
return abspath in paths
|
def get_outputs_by_public_key ( self , public_key ) :
"""Get outputs for a public key"""
|
txs = list ( query . get_owned_ids ( self . connection , public_key ) )
return [ TransactionLink ( tx [ 'id' ] , index ) for tx in txs for index , output in enumerate ( tx [ 'outputs' ] ) if condition_details_has_owner ( output [ 'condition' ] [ 'details' ] , public_key ) ]
|
def number_of_extents ( self ) :
"""int : number of extents ."""
|
if not self . _is_parsed :
self . _Parse ( )
self . _is_parsed = True
return len ( self . _extents )
|
def output ( data , ** kwargs ) :
'''Display the profiling data in a table format .'''
|
rows = _find_durations ( data )
kwargs [ 'opts' ] = __opts__
kwargs [ 'rows_key' ] = 'rows'
kwargs [ 'labels_key' ] = 'labels'
to_show = { 'labels' : [ 'name' , 'mod.fun' , 'duration (ms)' ] , 'rows' : rows }
return table_out . output ( to_show , ** kwargs )
|
def GET ( self ) :
"""Show page"""
|
todos = model . get_todos ( )
form = self . form ( )
return render . index ( todos , form )
|
def _mount_avfs ( self ) :
"""Mounts the AVFS filesystem ."""
|
self . _paths [ 'avfs' ] = tempfile . mkdtemp ( prefix = 'image_mounter_avfs_' )
# start by calling the mountavfs command to initialize avfs
_util . check_call_ ( [ 'avfsd' , self . _paths [ 'avfs' ] , '-o' , 'allow_other' ] , stdout = subprocess . PIPE )
# no multifile support for avfs
avfspath = self . _paths [ 'avfs' ] + '/' + os . path . abspath ( self . paths [ 0 ] ) + '#'
targetraw = os . path . join ( self . mountpoint , 'avfs' )
os . symlink ( avfspath , targetraw )
logger . debug ( "Symlinked {} with {}" . format ( avfspath , targetraw ) )
raw_path = self . get_raw_path ( )
logger . debug ( "Raw path to avfs is {}" . format ( raw_path ) )
if raw_path is None :
raise MountpointEmptyError ( )
|
def compile_ir ( engine , llvm_ir ) :
"""Compile the LLVM IR string with the given engine .
The compiled module object is returned ."""
|
# Create a LLVM module object from the IR
mod = llvm . parse_assembly ( llvm_ir )
mod . verify ( )
# Now add the module and make sure it is ready for execution
engine . add_module ( mod )
engine . finalize_object ( )
engine . run_static_constructors ( )
return mod
|
def types ( self ) :
'''Returns an iterator over the types of the neurites in the object .
If the object is a tree , then one value is returned .'''
|
neurites = self . _obj . neurites if hasattr ( self . _obj , 'neurites' ) else ( self . _obj , )
return ( neu . type for neu in neurites )
|
def move_file_to_file ( old_path , new_path ) :
"""Moves file from old location to new one
: param old _ path : path of file to move
: param new _ path : new path"""
|
try :
os . rename ( old_path , new_path )
except :
old_file = os . path . basename ( old_path )
target_directory , target_file = os . path . dirname ( os . path . abspath ( new_path ) ) , os . path . basename ( new_path )
Document . move_file_to_directory ( old_path , target_directory )
# move old file to new directory , change name to new name
os . rename ( os . path . join ( target_directory , old_file ) , os . path . join ( target_directory , target_file ) )
|
def create_node ( manager , name , meta_type_label , type_label , handle_id , legacy = True ) :
"""Creates a node with the mandatory attributes name and handle _ id also sets type label .
: param manager : Manager to handle sessions and transactions
: param name : Node name
: param meta _ type _ label : Node meta type
: param type _ label : Node label
: param handle _ id : Unique id
: param legacy : Backwards compatibility
: type manager : norduniclient . contextmanager . Neo4jDBSessionManager
: type name : str | unicode
: type meta _ type _ label : str | unicode
: type type _ label : str | unicode
: type handle _ id : str | unicode
: type legacy : Boolean
: rtype : dict | neo4j . v1 . types . Node"""
|
if meta_type_label not in META_TYPES :
raise exceptions . MetaLabelNamingError ( meta_type_label )
q = """
CREATE (n:Node:%s:%s { name: { name }, handle_id: { handle_id }})
RETURN n
""" % ( meta_type_label , type_label )
with manager . session as s :
if legacy :
return s . run ( q , { 'name' : name , 'handle_id' : handle_id } ) . single ( ) [ 'n' ] . properties
return s . run ( q , { 'name' : name , 'handle_id' : handle_id } ) . single ( ) [ 'n' ]
|
def set_ticks ( self , max_xticks = _NTICKS , max_yticks = _NTICKS , fontsize = _FONTSIZE ) :
"""Set and control tick behavior
Parameters
max _ xticks , max _ yticks : int , optional
Maximum number of labeled ticks to plot on x , y axes
fontsize : string or int
Font size as used by matplotlib text
Returns
self : FacetGrid object"""
|
from matplotlib . ticker import MaxNLocator
# Both are necessary
x_major_locator = MaxNLocator ( nbins = max_xticks )
y_major_locator = MaxNLocator ( nbins = max_yticks )
for ax in self . axes . flat :
ax . xaxis . set_major_locator ( x_major_locator )
ax . yaxis . set_major_locator ( y_major_locator )
for tick in itertools . chain ( ax . xaxis . get_major_ticks ( ) , ax . yaxis . get_major_ticks ( ) ) :
tick . label . set_fontsize ( fontsize )
return self
|
def json_load_object_hook ( dct ) :
"""Hook for json . parse ( . . . ) to parse Xero date formats ."""
|
for key , value in dct . items ( ) :
if isinstance ( value , six . string_types ) :
value = parse_date ( value )
if value :
dct [ key ] = value
return dct
|
def resolve_path ( path , expected = None , multi_projects = False , allow_empty_string = True ) :
''': param path : A path to a data object to attempt to resolve
: type path : string
: param expected : one of the following : " folder " , " entity " , or None
to indicate whether the expected path is a folder , a data
object , or either
: type expected : string or None
: returns : A tuple of 3 values : container _ ID , folderpath , entity _ name
: rtype : string , string , string
: raises : exc : ` ResolutionError ` if the project cannot be resolved by
name or the path is malformed
: param allow _ empty _ string : If false , a ResolutionError will be
raised if * path * is an empty string . Use this when resolving
the empty string could result in unexpected behavior .
: type allow _ empty _ string : boolean
Attempts to resolve * path * to a project or container ID , a folder
path , and a data object or folder name . This method will NOT
raise an exception if the specified folder or object does not
exist . This method is primarily for parsing purposes .
Returns one of the following :
( project , folder , maybe _ name )
where
project is a container ID ( non - null )
folder is a folder path
maybe _ name is a string if the path could represent a folder or an object , or
maybe _ name is None if the path could only represent a folder
OR
( maybe _ project , None , object _ id )
where
maybe _ project is a container ID or None
object _ id is a dataobject , app , or execution ( specified by ID , not name )
OR
( job _ id , None , output _ name )
where
job _ id and output _ name are both non - null'''
|
# TODO : callers that intend to obtain a data object probably won ' t be happy
# with an app or execution ID . Callers should probably have to specify
# whether they are okay with getting an execution ID or not .
# TODO : callers that are looking for a place to write data , rather than
# read it , probably won ' t be happy with receiving an object ID , or a
# JBOR . Callers should probably specify whether they are looking for an
# " LHS " expression or not .
if '_DX_FUSE' in os . environ :
from xattr import xattr
path = xattr ( path ) [ 'project' ] + ":" + xattr ( path ) [ 'id' ]
if path == '' and not allow_empty_string :
raise ResolutionError ( 'Cannot parse ""; expected the path to be a non-empty string' )
path = _maybe_convert_stringified_dxlink ( path )
# Easy case : " : "
if path == ':' :
if dxpy . WORKSPACE_ID is None :
raise ResolutionError ( "Cannot resolve \":\": expected a project name or ID " "to the left of the colon, or for a current project to be set" )
return ( [ dxpy . WORKSPACE_ID ] if multi_projects else dxpy . WORKSPACE_ID ) , '/' , None
# Second easy case : empty string
if path == '' :
if dxpy . WORKSPACE_ID is None :
raise ResolutionError ( 'Expected a project name or ID to the left of a colon, ' 'or for a current project to be set' )
return ( [ dxpy . WORKSPACE_ID ] if multi_projects else dxpy . WORKSPACE_ID ) , dxpy . config . get ( 'DX_CLI_WD' , '/' ) , None
# Third easy case : hash ID
if is_container_id ( path ) :
return ( [ path ] if multi_projects else path ) , '/' , None
elif is_hashid ( path ) :
return ( [ dxpy . WORKSPACE_ID ] if multi_projects else dxpy . WORKSPACE_ID ) , None , path
# using a numerical sentinel value to indicate that it hasn ' t been
# set in case dxpy . WORKSPACE _ ID is actually None
project = 0
folderpath = None
entity_name = None
wd = dxpy . config . get ( 'DX_CLI_WD' , u'/' )
# Test for multiple colons
last_colon = get_last_pos_of_char ( ':' , path )
if last_colon >= 0 :
last_last_colon = get_last_pos_of_char ( ':' , path [ : last_colon ] )
if last_last_colon >= 0 :
raise ResolutionError ( 'Cannot parse "' + path + '" as a path; at most one unescaped colon can be present' )
substrings = split_unescaped ( ':' , path )
if len ( substrings ) == 2 : # One of the following :
# 1 ) job - id : fieldname
# 2 ) project - name - or - id : folderpath / to / possible / entity
if is_job_id ( substrings [ 0 ] ) :
return ( [ substrings [ 0 ] ] if multi_projects else substrings [ 0 ] ) , None , substrings [ 1 ]
if multi_projects :
project_ids = resolve_container_id_or_name ( substrings [ 0 ] , is_error = True , multi = True )
else :
project = resolve_container_id_or_name ( substrings [ 0 ] , is_error = True )
wd = '/'
elif get_last_pos_of_char ( ':' , path ) >= 0 : # : folderpath / to / possible / entity OR project - name - or - id :
# Colon is either at the beginning or at the end
wd = '/'
if path . startswith ( ':' ) :
if dxpy . WORKSPACE_ID is None :
raise ResolutionError ( 'Cannot resolve "%s": expected a project name or ID to the left of the ' 'colon, or for a current project to be set' % ( path , ) )
project = dxpy . WORKSPACE_ID
else : # One nonempty string to the left of a colon
project = resolve_container_id_or_name ( substrings [ 0 ] , is_error = True )
folderpath = '/'
else : # One nonempty string , no colon present , do NOT interpret as
# project
project = dxpy . WORKSPACE_ID
if project is None :
raise ResolutionError ( 'Cannot resolve "%s": expected the path to be qualified with a project name or ID, ' 'and a colon; or for a current project to be set' % ( path , ) )
# Determine folderpath and entity _ name if necessary
if folderpath is None :
folderpath = substrings [ - 1 ]
folderpath , entity_name = clean_folder_path ( ( '' if folderpath . startswith ( '/' ) else wd + '/' ) + folderpath , expected )
if multi_projects :
return ( project_ids if project == 0 else [ project ] ) , folderpath , entity_name
else :
return project , folderpath , entity_name
|
def update ( self , settings ) :
'''updates the internal dictionary
Args :
settings : parameters to be set
# mabe in the future :
# Returns : boolean that is true if update successful'''
|
if 'settings' in settings :
self . _settings . update ( settings [ 'settings' ] )
else :
self . _settings . update ( settings )
if 'instruments' in settings :
for instrument_name , instrument_setting in settings [ 'instruments' ] . items ( ) :
self . instruments [ instrument_name ] [ 'settings' ] . update ( instrument_setting [ 'settings' ] )
if 'scripts' in settings :
for script_name , script_setting in settings [ 'scripts' ] . items ( ) :
self . scripts [ script_name ] . update ( script_setting )
|
def choose_ancestral_states_mppa ( tree , feature , states , force_joint = True ) :
"""Chooses node ancestral states based on their marginal probabilities using MPPA method .
: param force _ joint : make sure that Joint state is chosen even if it has a low probability .
: type force _ joint : bool
: param tree : tree of interest
: type tree : ete3 . Tree
: param feature : character for which the ancestral states are to be chosen
: type feature : str
: param states : possible character states in order corresponding to the probabilities array
: type states : numpy . array
: return : number of ancestral scenarios selected ,
calculated by multiplying the number of selected states for all nodes .
Also modified the get _ personalized _ feature _ name ( feature , ALLOWED _ STATES ) feature of each node
to only contain the selected states .
: rtype : int"""
|
lh_feature = get_personalized_feature_name ( feature , LH )
allowed_state_feature = get_personalized_feature_name ( feature , ALLOWED_STATES )
joint_state_feature = get_personalized_feature_name ( feature , JOINT_STATE )
n = len ( states )
_ , state2array = get_state2allowed_states ( states , False )
num_scenarios = 1
unresolved_nodes = 0
num_states = 0
# If force _ joint = = True ,
# we make sure that the joint state is always chosen ,
# for this we sort the marginal probabilities array as [ lowest _ non _ joint _ mp , . . . , highest _ non _ joint _ mp , joint _ mp ]
# select k in 1 : n such as the correction between choosing 0 , 0 , . . . , 1 / k , . . . , 1 / k and our sorted array is min
# and return the corresponding states
for node in tree . traverse ( ) :
marginal_likelihoods = getattr ( node , lh_feature )
marginal_probs = marginal_likelihoods / marginal_likelihoods . sum ( )
if force_joint :
joint_index = getattr ( node , joint_state_feature )
joint_prob = marginal_probs [ joint_index ]
marginal_probs = np . hstack ( ( np . sort ( np . delete ( marginal_probs , joint_index ) ) , [ joint_prob ] ) )
else :
marginal_probs = np . sort ( marginal_probs )
best_k = n
best_correstion = np . inf
for k in range ( 1 , n + 1 ) :
correction = np . hstack ( ( np . zeros ( n - k ) , np . ones ( k ) / k ) ) - marginal_probs
correction = correction . dot ( correction )
if correction < best_correstion :
best_correstion = correction
best_k = k
num_scenarios *= best_k
num_states += best_k
if force_joint :
indices_selected = sorted ( range ( n ) , key = lambda _ : ( 0 if n == joint_index else 1 , - marginal_likelihoods [ _ ] ) ) [ : best_k ]
else :
indices_selected = sorted ( range ( n ) , key = lambda _ : - marginal_likelihoods [ _ ] ) [ : best_k ]
if best_k == 1 :
allowed_states = state2array [ indices_selected [ 0 ] ]
else :
allowed_states = np . zeros ( len ( states ) , dtype = np . int )
allowed_states [ indices_selected ] = 1
unresolved_nodes += 1
node . add_feature ( allowed_state_feature , allowed_states )
return num_scenarios , unresolved_nodes , num_states
|
def process_IN_CREATE ( self , raw_event ) :
"""If the event affects a directory and the auto _ add flag of the
targetted watch is set to True , a new watch is added on this
new directory , with the same attribute values than those of
this watch ."""
|
if raw_event . mask & IN_ISDIR :
watch_ = self . _watch_manager . get_watch ( raw_event . wd )
created_dir = os . path . join ( watch_ . path , raw_event . name )
if watch_ . auto_add and not watch_ . exclude_filter ( created_dir ) :
addw = self . _watch_manager . add_watch
# The newly monitored directory inherits attributes from its
# parent directory .
addw_ret = addw ( created_dir , watch_ . mask , proc_fun = watch_ . proc_fun , rec = False , auto_add = watch_ . auto_add , exclude_filter = watch_ . exclude_filter )
# Trick to handle mkdir - p / d1 / d2 / t3 where d1 is watched and
# d2 and t3 ( directory or file ) are created .
# Since the directory d2 is new , then everything inside it must
# also be new .
created_dir_wd = addw_ret . get ( created_dir )
if ( ( created_dir_wd is not None ) and ( created_dir_wd > 0 ) and os . path . isdir ( created_dir ) ) :
try :
for name in os . listdir ( created_dir ) :
inner = os . path . join ( created_dir , name )
if self . _watch_manager . get_wd ( inner ) is not None :
continue
# Generate ( simulate ) creation events for sub -
# directories and files .
if os . path . isfile ( inner ) : # symlinks are handled as files .
flags = IN_CREATE
elif os . path . isdir ( inner ) :
flags = IN_CREATE | IN_ISDIR
else : # This path should not be taken .
continue
rawevent = _RawEvent ( created_dir_wd , flags , 0 , name )
self . _notifier . append_event ( rawevent )
except OSError , err :
msg = "process_IN_CREATE, invalid directory %s: %s"
log . debug ( msg % ( created_dir , str ( err ) ) )
return self . process_default ( raw_event )
|
def unmarshal ( self , v ) :
"""Convert the value from Strava API format to useful python representation .
If the value does not appear in the choices attribute we log an error rather
than raising an exception as this may be caused by a change to the API upstream
so we want to fail gracefully ."""
|
try :
return self . choices [ v ]
except KeyError :
self . log . warning ( "No such choice {0} for field {1}." . format ( v , self ) )
# Just return the value from the API
return v
|
def list_media_endpoint_keys ( access_token , subscription_id , rgname , msname ) :
'''list the media endpoint keys in a media service
Args :
access _ token ( str ) : A valid Azure authentication token .
subscription _ id ( str ) : Azure subscription id .
rgname ( str ) : Azure resource group name .
msname ( str ) : Media service name .
Returns :
HTTP response . JSON body .'''
|
endpoint = '' . join ( [ get_rm_endpoint ( ) , '/subscriptions/' , subscription_id , '/resourceGroups/' , rgname , '/providers/microsoft.media/' , '/mediaservices/' , msname , '/listKeys?api-version=' , MEDIA_API ] )
return do_get ( endpoint , access_token )
|
def get_interpolated_value ( self , energy ) :
"""Returns interpolated density for a particular energy .
Args :
energy : Energy to return the density for ."""
|
f = { }
for spin in self . densities . keys ( ) :
f [ spin ] = get_linear_interpolated_value ( self . energies , self . densities [ spin ] , energy )
return f
|
def DefaultAdapter ( self ) :
'''Retrieve the default adapter'''
|
default_adapter = None
for obj in mockobject . objects . keys ( ) :
if obj . startswith ( '/org/bluez/' ) and 'dev_' not in obj :
default_adapter = obj
if default_adapter :
return dbus . ObjectPath ( default_adapter , variant_level = 1 )
else :
raise dbus . exceptions . DBusException ( 'No such adapter.' , name = 'org.bluez.Error.NoSuchAdapter' )
|
def _set_qsfp ( self , v , load = False ) :
"""Setter method for qsfp , mapped from YANG variable / brocade _ interface _ ext _ rpc / get _ media _ detail / output / interface / qsfp ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ qsfp is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ qsfp ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = qsfp . qsfp , is_container = 'container' , presence = False , yang_name = "qsfp" , rest_name = "qsfp" , parent = self , choice = ( u'interface-identifier' , u'qsfp' ) , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = False , extensions = None , namespace = 'urn:brocade.com:mgmt:brocade-interface-ext' , defining_module = 'brocade-interface-ext' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """qsfp must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=qsfp.qsfp, is_container='container', presence=False, yang_name="qsfp", rest_name="qsfp", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='container', is_config=True)""" , } )
self . __qsfp = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def value ( self , value ) :
"""Used to set the ` ` value ` ` of form elements ."""
|
self . client . nowait ( 'set_field' , ( Literal ( 'browser' ) , self . element , value ) )
|
def match_operators ( inp , relate , cut ) :
"""Compare two items . Match a string operator to an operator function
: param str inp : Comparison item
: param str relate : Comparison operator
: param any cut : Comparison item
: return bool truth : Comparison truth"""
|
logger_misc . info ( "enter match_operators" )
ops = { '>' : operator . gt , '<' : operator . lt , '>=' : operator . ge , '<=' : operator . le , '=' : operator . eq }
try :
truth = ops [ relate ] ( inp , cut )
except KeyError as e :
truth = False
logger_misc . warn ( "get_truth: KeyError: Invalid operator input: {}, {}" . format ( relate , e ) )
logger_misc . info ( "exit match_operators" )
return truth
|
def connect_any_signal_changed ( self , function ) :
"""Connects the " anything changed " signal for all of the tree to the
specified function .
Parameters
function
Function to connect to this signal ."""
|
# loop over all top level parameters
for i in range ( self . _widget . topLevelItemCount ( ) ) : # make sure there is only one connection !
try :
self . _widget . topLevelItem ( i ) . param . sigTreeStateChanged . connect ( function , type = _g . QtCore . Qt . UniqueConnection )
except :
pass
return self
|
def parse_url_rules ( urls_fp ) :
"""URL rules from given fp"""
|
url_rules = [ ]
for line in urls_fp :
re_url = line . strip ( )
if re_url :
url_rules . append ( { 'str' : re_url , 're' : re . compile ( re_url ) } )
return url_rules
|
def extract_references_from_wets ( wet_files , metadata_dir , out_dir , tmp_dir = None ) :
"""Extract references from WET files into sharded output files ."""
|
# Setup output files
shard_files = make_ref_shard_files ( out_dir )
num_refs = 0
for i , wet_file in enumerate ( wet_files ) :
num_refs_in_wet = 0
tf . logging . info ( "Processing file %d" , i )
# Read metadata file
metadata_fname = os . path . join ( metadata_dir , os . path . basename ( wet_file ) ) + cc_utils . METADTA_SUFFIX
with tf . gfile . Open ( cc_utils . readahead ( metadata_fname ) ) as f :
wet_metadata = json . loads ( f . read ( ) )
if not wet_metadata : # No references in this WET file
continue
if wet_file . startswith ( "http" ) : # download
if not tmp_dir :
tmp_dir = tempfile . gettempdir ( )
record_gen = cc_utils . wet_records_from_url ( wet_file , tmp_dir )
else : # local
record_gen = cc_utils . wet_records_from_file_obj ( cc_utils . gzip_memfile ( wet_file ) , take_ownership = True )
for wet_record in record_gen :
shard_ids = wet_metadata . get ( wet_record . url )
if not shard_ids : # URL not in dataset
continue
# Serialize and write out
ex = _make_example_from_record ( wet_record )
ex_str = ex . SerializeToString ( )
for shard_id in shard_ids :
shard_files [ shard_id ] . write ( ex_str )
num_refs += 1
num_refs_in_wet += 1
tf . logging . info ( "Wrote out %d references for this WET" , num_refs_in_wet )
tf . logging . info ( "Wrote out %d references total" , num_refs )
# Cleanup
for shard_file in shard_files :
shard_file . close ( )
|
def acquire_for ( pid_dir , num_available = 1 , kill_signal = None ) :
"""Makes sure the process is only run once at the same time with the same name .
Notice that we since we check the process name , different parameters to the same
command can spawn multiple processes at the same time , i . e . running
" / usr / bin / my _ process " does not prevent anyone from launching
" / usr / bin / my _ process - - foo bar " ."""
|
my_pid , my_cmd , pid_file = get_info ( pid_dir )
# Create a pid file if it does not exist
try :
os . mkdir ( pid_dir )
os . chmod ( pid_dir , 0o777 )
except OSError as exc :
if exc . errno != errno . EEXIST :
raise
pass
# Let variable " pids " be all pids who exist in the . pid - file who are still
# about running the same command .
pids = { pid for pid in _read_pids_file ( pid_file ) if getpcmd ( pid ) == my_cmd }
if kill_signal is not None :
for pid in pids :
os . kill ( pid , kill_signal )
print ( 'Sent kill signal to Pids: {}' . format ( pids ) )
# We allow for the killer to progress , yet we don ' t want these to stack
# up ! So we only allow it once .
num_available += 1
if len ( pids ) >= num_available : # We are already running under a different pid
print ( 'Pid(s) {} already running' . format ( pids ) )
if kill_signal is not None :
print ( 'Note: There have (probably) been 1 other "--take-lock"' ' process which continued to run! Probably no need to run' ' this one as well.' )
return False
_write_pids_file ( pid_file , pids | { my_pid } )
return True
|
def is_leap ( year ) :
"""Leap year or not in the Gregorian calendar ."""
|
x = math . fmod ( year , 4 )
y = math . fmod ( year , 100 )
z = math . fmod ( year , 400 )
# Divisible by 4 and ,
# either not divisible by 100 or divisible by 400.
return not x and ( y or not z )
|
def _move_to_top ( self , pos ) :
"""Move element at given position to top of queue ."""
|
if pos > 0 :
self . queue . rotate ( - pos )
item = self . queue . popleft ( )
self . queue . rotate ( pos )
self . queue . appendleft ( item )
|
def make_summaries ( self ) :
"""Make and save summary csv files ,
each containing values from all cells"""
|
self . summary_df = save_summaries ( self . frames , self . keys , self . selected_summaries , self . batch_dir , self . name )
logger . debug ( "made and saved summaries" )
|
def save ( self , * args , ** kwargs ) :
"""The save method should create a new OrganizationUser linking the User
matching the provided email address . If not matching User is found it
should kick off the registration process . It needs to create a User in
order to link it to the Organization ."""
|
try :
user = get_user_model ( ) . objects . get ( email__iexact = self . cleaned_data [ "email" ] )
except get_user_model ( ) . MultipleObjectsReturned :
raise forms . ValidationError ( _ ( "This email address has been used multiple times." ) )
except get_user_model ( ) . DoesNotExist :
user = invitation_backend ( ) . invite_by_email ( self . cleaned_data [ "email" ] , ** { "domain" : get_current_site ( self . request ) , "organization" : self . organization , "sender" : self . request . user , } )
# Send a notification email to this user to inform them that they
# have been added to a new organization .
invitation_backend ( ) . send_notification ( user , ** { "domain" : get_current_site ( self . request ) , "organization" : self . organization , "sender" : self . request . user , } )
return OrganizationUser . objects . create ( user = user , organization = self . organization , is_admin = self . cleaned_data [ "is_admin" ] , )
|
def command_x ( self , x , to = None ) :
"""Sends a character to the currently active element with Command
pressed . This method takes care of pressing and releasing
Command ."""
|
if to is None :
ActionChains ( self . driver ) . send_keys ( [ Keys . COMMAND , x , Keys . COMMAND ] ) . perform ( )
else :
self . send_keys ( to , [ Keys . COMMAND , x , Keys . COMMAND ] )
|
def gps2dt ( gps_week , gps_ms ) :
"""Convert GPS week and ms to a datetime"""
|
gps_epoch = datetime ( 1980 , 1 , 6 , 0 , 0 , 0 )
gps_week_s = timedelta ( seconds = gps_week * 7 * 24 * 60 * 60 )
gps_ms_s = timedelta ( milliseconds = gps_ms )
return gps_epoch + gps_week_s + gps_ms_s
|
def get_vm ( access_token , subscription_id , resource_group , vm_name ) :
'''Get virtual machine details .
Args :
access _ token ( str ) : A valid Azure authentication token .
subscription _ id ( str ) : Azure subscription id .
resource _ group ( str ) : Azure resource group name .
vm _ name ( str ) : Name of the virtual machine .
Returns :
HTTP response . JSON body of VM properties .'''
|
endpoint = '' . join ( [ get_rm_endpoint ( ) , '/subscriptions/' , subscription_id , '/resourceGroups/' , resource_group , '/providers/Microsoft.Compute/virtualMachines/' , vm_name , '?api-version=' , COMP_API ] )
return do_get ( endpoint , access_token )
|
def clear_cache ( self ) :
"""Clear the TTS cache , removing all cache files from disk .
. . versionadded : : 1.6.0"""
|
if self . use_cache :
self . log ( u"Requested to clear TTS cache" )
self . cache . clear ( )
|
def _get_valididty ( self ) :
"""Determines whether this AD Group is valid .
: returns : True and " " if this group is valid or False and a string description
with the reason why it isn ' t valid otherwise ."""
|
try :
self . ldap_connection . search ( search_base = self . VALID_GROUP_TEST [ 'base_dn' ] , search_filter = self . VALID_GROUP_TEST [ 'filter_string' ] , search_scope = self . VALID_GROUP_TEST [ 'scope' ] , attributes = self . VALID_GROUP_TEST [ 'attribute_list' ] )
except LDAPOperationsErrorResult as error_message :
raise ImproperlyConfigured ( "The LDAP server most-likely does not accept anonymous connections:" "\n\t{error}" . format ( error = error_message [ 0 ] [ 'info' ] ) )
except LDAPInvalidDNSyntaxResult :
return False , "Invalid DN Syntax: {group_dn}" . format ( group_dn = self . group_dn )
except LDAPNoSuchObjectResult :
return False , "No such group: {group_dn}" . format ( group_dn = self . group_dn )
except LDAPSizeLimitExceededResult :
return False , ( "This group has too many children for ldap-groups to handle: " "{group_dn}" . format ( group_dn = self . group_dn ) )
return True , ""
|
def _insert_tasks ( tasks , queue , transactional = False , retry_transient_errors = True , retry_delay = RETRY_SLEEP_SECS ) :
"""Insert a batch of tasks into the specified queue . If an error occurs
during insertion , split the batch and retry until they are successfully
inserted . Return the number of successfully inserted tasks ."""
|
from google . appengine . api import taskqueue
if not tasks :
return 0
try :
taskqueue . Queue ( name = queue ) . add ( tasks , transactional = transactional )
return len ( tasks )
except ( taskqueue . BadTaskStateError , taskqueue . TaskAlreadyExistsError , taskqueue . TombstonedTaskError ) :
if len ( tasks ) <= 1 : # Task has already been inserted , no reason to report an error here .
return 0
# If a list of more than one Tasks is given , a raised exception does
# not guarantee that no tasks were added to the queue ( unless
# transactional is set to True ) . To determine which tasks were
# successfully added when an exception is raised , check the
# Task . was _ enqueued property .
reinsert = _tasks_to_reinsert ( tasks , transactional )
count = len ( reinsert )
inserted = len ( tasks ) - count
inserted += _insert_tasks ( reinsert [ : count / 2 ] , queue , transactional , retry_transient_errors , retry_delay )
inserted += _insert_tasks ( reinsert [ count / 2 : ] , queue , transactional , retry_transient_errors , retry_delay )
return inserted
except taskqueue . TransientError : # Always re - raise for transactional insert , or if specified by
# options .
if transactional or not retry_transient_errors :
raise
reinsert = _tasks_to_reinsert ( tasks , transactional )
# Retry with a delay , and then let any errors re - raise .
time . sleep ( retry_delay )
taskqueue . Queue ( name = queue ) . add ( reinsert , transactional = transactional )
return len ( tasks )
|
def create_hitor_calibration ( output_filename , plot_pixel_calibrations = False ) :
'''Generating HitOr calibration file ( _ calibration . h5 ) from raw data file and plotting of calibration data .
Parameters
output _ filename : string
Input raw data file name .
plot _ pixel _ calibrations : bool , iterable
If True , genearating additional pixel calibration plots . If list of column and row tuples ( from 1 to 80 / 336 ) , print selected pixels .
Returns
nothing'''
|
logging . info ( 'Analyze HitOR calibration data and plot results of %s' , output_filename )
with AnalyzeRawData ( raw_data_file = output_filename , create_pdf = True ) as analyze_raw_data : # Interpret the raw data file
analyze_raw_data . create_occupancy_hist = False
# too many scan parameters to do in ram histogramming
analyze_raw_data . create_hit_table = True
analyze_raw_data . create_tdc_hist = True
analyze_raw_data . align_at_tdc = True
# align events at TDC words , first word of event has to be a tdc word
analyze_raw_data . interpret_word_table ( )
analyze_raw_data . interpreter . print_summary ( )
analyze_raw_data . plot_histograms ( )
n_injections = analyze_raw_data . n_injections
# use later
meta_data = analyze_raw_data . out_file_h5 . root . meta_data [ : ]
scan_parameters_dict = get_scan_parameter ( meta_data )
inner_loop_parameter_values = scan_parameters_dict [ next ( reversed ( scan_parameters_dict ) ) ]
# inner loop parameter name is unknown
scan_parameter_names = scan_parameters_dict . keys ( )
# col _ row _ combinations = get _ unique _ scan _ parameter _ combinations ( analyze _ raw _ data . out _ file _ h5 . root . meta _ data [ : ] , scan _ parameters = ( ' column ' , ' row ' ) , scan _ parameter _ columns _ only = True )
meta_data_table_at_scan_parameter = get_unique_scan_parameter_combinations ( meta_data , scan_parameters = scan_parameter_names )
scan_parameter_values = get_scan_parameters_table_from_meta_data ( meta_data_table_at_scan_parameter , scan_parameter_names )
event_number_ranges = get_ranges_from_array ( meta_data_table_at_scan_parameter [ 'event_number' ] )
event_ranges_per_parameter = np . column_stack ( ( scan_parameter_values , event_number_ranges ) )
if analyze_raw_data . out_file_h5 . root . Hits . nrows == 0 :
raise AnalysisError ( "Found no hits." )
hits = analyze_raw_data . out_file_h5 . root . Hits [ : ]
event_numbers = hits [ 'event_number' ] . copy ( )
# create contigous array , otherwise np . searchsorted too slow , http : / / stackoverflow . com / questions / 15139299 / performance - of - numpy - searchsorted - is - poor - on - structured - arrays
output_filename = os . path . splitext ( output_filename ) [ 0 ]
with tb . open_file ( output_filename + "_calibration.h5" , mode = "w" ) as calibration_data_file :
logging . info ( 'Create calibration' )
calibration_data = np . full ( shape = ( 80 , 336 , len ( inner_loop_parameter_values ) , 4 ) , fill_value = np . nan , dtype = 'f4' )
# result of the calibration is a histogram with col _ index , row _ index , plsrDAC value , mean discrete tot , rms discrete tot , mean tot from TDC , rms tot from TDC
progress_bar = progressbar . ProgressBar ( widgets = [ '' , progressbar . Percentage ( ) , ' ' , progressbar . Bar ( marker = '*' , left = '|' , right = '|' ) , ' ' , progressbar . AdaptiveETA ( ) ] , maxval = len ( event_ranges_per_parameter ) , term_width = 80 )
progress_bar . start ( )
for index , ( actual_scan_parameter_values , event_start , event_stop ) in enumerate ( event_ranges_per_parameter ) :
if event_stop is None : # happens for the last chunk
event_stop = hits [ - 1 ] [ 'event_number' ] + 1
array_index = np . searchsorted ( event_numbers , np . array ( [ event_start , event_stop ] ) )
actual_hits = hits [ array_index [ 0 ] : array_index [ 1 ] ]
for item_index , item in enumerate ( scan_parameter_names ) :
if item == "column" :
actual_col = actual_scan_parameter_values [ item_index ]
elif item == "row" :
actual_row = actual_scan_parameter_values [ item_index ]
elif item == "PlsrDAC" :
plser_dac = actual_scan_parameter_values [ item_index ]
else :
raise ValueError ( "Unknown scan parameter %s" % item )
# Only pixel of actual column / row should be in the actual data chunk but since FIFO is not cleared for each scan step due to speed reasons and there might be noisy pixels this is not always the case
n_wrong_pixel = np . count_nonzero ( np . logical_or ( actual_hits [ 'column' ] != actual_col , actual_hits [ 'row' ] != actual_row ) )
if n_wrong_pixel != 0 :
logging . warning ( '%d hit(s) from other pixels for scan parameters %s' , n_wrong_pixel , ', ' . join ( [ '%s=%s' % ( name , value ) for ( name , value ) in zip ( scan_parameter_names , actual_scan_parameter_values ) ] ) )
actual_hits = actual_hits [ np . logical_and ( actual_hits [ 'column' ] == actual_col , actual_hits [ 'row' ] == actual_row ) ]
# Only take data from selected pixel
actual_tdc_hits = actual_hits [ ( actual_hits [ 'event_status' ] & 0b0000111110011100 ) == 0b0000000100000000 ]
# only take hits from good events ( one TDC word only , no error )
actual_tot_hits = actual_hits [ ( actual_hits [ 'event_status' ] & 0b0000100010011100 ) == 0b0000000000000000 ]
# only take hits from good events for tot
tot , tdc = actual_tot_hits [ 'tot' ] , actual_tdc_hits [ 'TDC' ]
if tdc . shape [ 0 ] < n_injections :
logging . info ( '%d of %d expected TDC hits for scan parameters %s' , tdc . shape [ 0 ] , n_injections , ', ' . join ( [ '%s=%s' % ( name , value ) for ( name , value ) in zip ( scan_parameter_names , actual_scan_parameter_values ) ] ) )
if tot . shape [ 0 ] < n_injections :
logging . info ( '%d of %d expected hits for scan parameters %s' , tot . shape [ 0 ] , n_injections , ', ' . join ( [ '%s=%s' % ( name , value ) for ( name , value ) in zip ( scan_parameter_names , actual_scan_parameter_values ) ] ) )
inner_loop_scan_parameter_index = np . where ( plser_dac == inner_loop_parameter_values ) [ 0 ] [ 0 ]
# translate the scan parameter value to an index for the result histogram
# numpy mean and std return nan if array is empty
calibration_data [ actual_col - 1 , actual_row - 1 , inner_loop_scan_parameter_index , 0 ] = np . mean ( tot )
calibration_data [ actual_col - 1 , actual_row - 1 , inner_loop_scan_parameter_index , 1 ] = np . mean ( tdc )
calibration_data [ actual_col - 1 , actual_row - 1 , inner_loop_scan_parameter_index , 2 ] = np . std ( tot )
calibration_data [ actual_col - 1 , actual_row - 1 , inner_loop_scan_parameter_index , 3 ] = np . std ( tdc )
progress_bar . update ( index )
progress_bar . finish ( )
calibration_data_out = calibration_data_file . create_carray ( calibration_data_file . root , name = 'HitOrCalibration' , title = 'Hit OR calibration data' , atom = tb . Atom . from_dtype ( calibration_data . dtype ) , shape = calibration_data . shape , filters = tb . Filters ( complib = 'blosc' , complevel = 5 , fletcher32 = False ) )
calibration_data_out [ : ] = calibration_data
calibration_data_out . attrs . dimensions = scan_parameter_names
calibration_data_out . attrs . scan_parameter_values = inner_loop_parameter_values
calibration_data_out . flush ( )
# with PdfPages ( output _ filename + " _ calibration . pdf " ) as output _ pdf :
plot_scurves ( calibration_data [ : , : , : , 0 ] , inner_loop_parameter_values , "ToT calibration" , "ToT" , 15 , "Charge [PlsrDAC]" , filename = analyze_raw_data . output_pdf )
plot_scurves ( calibration_data [ : , : , : , 1 ] , inner_loop_parameter_values , "TDC calibration" , "TDC [ns]" , None , "Charge [PlsrDAC]" , filename = analyze_raw_data . output_pdf )
tot_mean_all_pix = np . nanmean ( calibration_data [ : , : , : , 0 ] , axis = ( 0 , 1 ) )
tot_error_all_pix = np . nanstd ( calibration_data [ : , : , : , 0 ] , axis = ( 0 , 1 ) )
tdc_mean_all_pix = np . nanmean ( calibration_data [ : , : , : , 1 ] , axis = ( 0 , 1 ) )
tdc_error_all_pix = np . nanstd ( calibration_data [ : , : , : , 1 ] , axis = ( 0 , 1 ) )
plot_tot_tdc_calibration ( scan_parameters = inner_loop_parameter_values , tot_mean = tot_mean_all_pix , tot_error = tot_error_all_pix , tdc_mean = tdc_mean_all_pix , tdc_error = tdc_error_all_pix , filename = analyze_raw_data . output_pdf , title = "Mean charge calibration of %d pixel(s)" % np . count_nonzero ( ~ np . all ( np . isnan ( calibration_data [ : , : , : , 0 ] ) , axis = 2 ) ) )
# plotting individual pixels
if plot_pixel_calibrations is True : # selecting pixels with non - nan entries
col_row_non_nan = np . nonzero ( ~ np . all ( np . isnan ( calibration_data [ : , : , : , 0 ] ) , axis = 2 ) )
plot_pixel_calibrations = np . dstack ( col_row_non_nan ) [ 0 ]
elif plot_pixel_calibrations is False :
plot_pixel_calibrations = np . array ( [ ] , dtype = np . int )
else : # assuming list of column / row tuples
plot_pixel_calibrations = np . array ( plot_pixel_calibrations ) - 1
# generate index array
pixel_indices = np . arange ( plot_pixel_calibrations . shape [ 0 ] )
plot_n_pixels = 10
# number of pixels at the beginning , center and end of the array
np . random . seed ( 0 )
# select random pixels
if pixel_indices . size - 2 * plot_n_pixels >= 0 :
random_pixel_indices = np . sort ( np . random . choice ( pixel_indices [ plot_n_pixels : - plot_n_pixels ] , min ( plot_n_pixels , pixel_indices . size - 2 * plot_n_pixels ) , replace = False ) )
else :
random_pixel_indices = np . array ( [ ] , dtype = np . int )
selected_pixel_indices = np . unique ( np . hstack ( [ pixel_indices [ : plot_n_pixels ] , random_pixel_indices , pixel_indices [ - plot_n_pixels : ] ] ) )
# plotting individual pixels
for ( column , row ) in plot_pixel_calibrations [ selected_pixel_indices ] :
logging . info ( "Plotting charge calibration for pixel column " + str ( column + 1 ) + " / row " + str ( row + 1 ) )
tot_mean_single_pix = calibration_data [ column , row , : , 0 ]
tot_std_single_pix = calibration_data [ column , row , : , 2 ]
tdc_mean_single_pix = calibration_data [ column , row , : , 1 ]
tdc_std_single_pix = calibration_data [ column , row , : , 3 ]
plot_tot_tdc_calibration ( scan_parameters = inner_loop_parameter_values , tot_mean = tot_mean_single_pix , tot_error = tot_std_single_pix , tdc_mean = tdc_mean_single_pix , tdc_error = tdc_std_single_pix , filename = analyze_raw_data . output_pdf , title = "Charge calibration for pixel column " + str ( column + 1 ) + " / row " + str ( row + 1 ) )
|
def mod_division_by_lists ( list1 , list2 ) :
"""A function to perform element - wise modulo division of two lists and returns a list of the results using map and lambda function .
> > > mod _ division _ by _ lists ( [ 4 , 5 , 6 ] , [ 1 , 2 , 3 ] )
[0 , 1 , 0]
> > > mod _ division _ by _ lists ( [ 3 , 2 ] , [ 1 , 4 ] )
[0 , 2]
> > > mod _ division _ by _ lists ( [ 90 , 120 ] , [ 50 , 70 ] )
[40 , 50]"""
|
resultant_modulus = map ( ( lambda x , y : x % y ) , list1 , list2 )
return list ( resultant_modulus )
|
def list_qos_policies ( self , retrieve_all = True , ** _params ) :
"""Fetches a list of all qos policies for a project ."""
|
# Pass filters in " params " argument to do _ request
return self . list ( 'policies' , self . qos_policies_path , retrieve_all , ** _params )
|
def _load_attributes ( self , mft_config , attrs_view ) :
'''Loads all the attributes of an entry .
Once executed , all the attributes should have been loaded in the
attribute * attrs * instance attribute .
Args :
mft _ config ( : obj : ` MFTConfig ` ) - An instance of MFTConfig , as this tells
how the library will interpret data .
attrs _ view ( memoryview ( bytearray ) ) - A binary stream that starts at
the first attribute until the end of the entry'''
|
offset = 0
load_attrs = mft_config . attribute_load_list
while ( attrs_view [ offset : offset + 4 ] != b'\xff\xff\xff\xff' ) :
attr_type , attr_len , non_resident = _get_attr_info ( attrs_view [ offset : ] )
if attr_type in load_attrs : # pass all the information to the attr , as we don ' t know how
# much content the attribute has
attr = Attribute . create_from_binary ( non_resident , mft_config . load_dataruns , attrs_view [ offset : ] )
if not attr . header . attr_type_id is AttrTypes . DATA :
self . attrs [ attr . header . attr_type_id ] . append ( attr )
# add an attribute
else :
self . _add_data_attribute ( attr )
offset += attr_len
|
def ini_dump_hook ( cfg , text : bool = False ) :
"""Dumps all the data into a INI file .
This will automatically kill anything with a ' _ ' in the keyname , replacing it with a dot . You have been warned ."""
|
data = cfg . config . dump ( )
# Load data back into the goddamned ini file .
ndict = { }
for key , item in data . items ( ) :
key = key . replace ( '_' , '.' )
ndict [ key ] = item
cfg . tmpini = configparser . ConfigParser ( )
cfg . tmpini . read_dict ( data )
if not text :
cfg . tmpini . write ( cfg . fd )
else :
return
cfg . reload ( )
|
def contrib_phone ( contrib_tag ) :
"""Given a contrib tag , look for an phone tag"""
|
phone = None
if raw_parser . phone ( contrib_tag ) :
phone = first ( raw_parser . phone ( contrib_tag ) ) . text
return phone
|
def add_prefix ( self , name , stmt ) :
"""Return ` name ` prepended with correct prefix .
If the name is already prefixed , the prefix may be translated
to the value obtained from ` self . module _ prefixes ` . Unmodified
` name ` is returned if we are inside a global grouping ."""
|
if self . gg_level :
return name
pref , colon , local = name . partition ( ":" )
if colon :
return ( self . module_prefixes [ stmt . i_module . i_prefixes [ pref ] [ 0 ] ] + ":" + local )
else :
return self . prefix_stack [ - 1 ] + ":" + pref
|
def value_to_bytes ( self , obj , value , default_endianness = DEFAULT_ENDIANNESS ) :
"""Converts the given value to an appropriately encoded string of bytes that represents it .
: param obj : The parent : class : ` . PebblePacket ` of this field
: type obj : . PebblePacket
: param value : The python value to serialise .
: param default _ endianness : The default endianness of the value . Used if ` ` endianness ` ` was not passed to the
: class : ` Field ` constructor .
: type default _ endianness : str
: return : The serialised value
: rtype : bytes"""
|
return struct . pack ( str ( self . endianness or default_endianness ) + self . struct_format , value )
|
def Deserialize ( self , reader ) :
"""Read serialized data from byte stream
Args :
reader ( neocore . IO . BinaryReader ) : reader to read byte data from"""
|
self . name = reader . ReadVarString ( ) . decode ( 'utf-8' )
self . symbol = reader . ReadVarString ( ) . decode ( 'utf-8' )
self . decimals = reader . ReadUInt8 ( )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.