signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def _loadData ( self , data ) :
"""Load attribute values from Plex XML response ."""
|
self . _setValue = None
self . id = data . attrib . get ( 'id' )
self . label = data . attrib . get ( 'label' )
self . summary = data . attrib . get ( 'summary' )
self . type = data . attrib . get ( 'type' )
self . default = self . _cast ( data . attrib . get ( 'default' ) )
self . value = self . _cast ( data . attrib . get ( 'value' ) )
self . hidden = utils . cast ( bool , data . attrib . get ( 'hidden' ) )
self . advanced = utils . cast ( bool , data . attrib . get ( 'advanced' ) )
self . group = data . attrib . get ( 'group' )
self . enumValues = self . _getEnumValues ( data )
|
def tsplit ( string , delimiters ) :
"""Behaves str . split but supports tuples of delimiters ."""
|
delimiters = tuple ( delimiters )
if len ( delimiters ) < 1 :
return [ string , ]
final_delimiter = delimiters [ 0 ]
for i in delimiters [ 1 : ] :
string = string . replace ( i , final_delimiter )
return string . split ( final_delimiter )
|
def get_topics ( self ) :
""": calls : ` GET / repos / : owner / : repo / topics < https : / / developer . github . com / v3 / repos / # list - all - topics - for - a - repository > ` _
: rtype : list of strings"""
|
headers , data = self . _requester . requestJsonAndCheck ( "GET" , self . url + "/topics" , headers = { 'Accept' : Consts . mediaTypeTopicsPreview } )
return data [ 'names' ]
|
def _import_plugins ( self ) :
"""Internal function , ensure all plugin packages are imported ."""
|
if self . detected :
return
# In some cases , plugin scanning may start during a request .
# Make sure there is only one thread scanning for plugins .
self . scanLock . acquire ( )
if self . detected :
return
# previous threaded released + completed
try :
import_apps_submodule ( "content_plugins" )
self . detected = True
finally :
self . scanLock . release ( )
|
def duplicates_removed ( it , already_seen = ( ) ) :
"""Returns a list with duplicates removed from the iterable ` it ` .
Order is preserved ."""
|
lst = [ ]
seen = set ( )
for i in it :
if i in seen or i in already_seen :
continue
lst . append ( i )
seen . add ( i )
return lst
|
def do_verify ( marfile , keyfiles = None ) :
"""Verify the MAR file ."""
|
try :
with open ( marfile , 'rb' ) as f :
with MarReader ( f ) as m : # Check various parts of the mar file
# e . g . signature algorithms and additional block sections
errors = m . get_errors ( )
if errors :
print ( "File is not well formed: {}" . format ( errors ) )
sys . exit ( 1 )
if keyfiles :
try :
keys = get_keys ( keyfiles , m . signature_type )
except ValueError as e :
print ( e )
sys . exit ( 1 )
if any ( m . verify ( key ) for key in keys ) :
print ( "Verification OK" )
return True
else :
print ( "Verification failed" )
sys . exit ( 1 )
else :
print ( "Verification OK" )
return True
except Exception as e :
print ( "Error opening or parsing file: {}" . format ( e ) )
sys . exit ( 1 )
|
def todict ( self ) :
"""Returns a dictionary fully representing the state of this object"""
|
return { 'index' : self . index , 'seed' : hb_encode ( self . seed ) , 'n' : self . n , 'root' : hb_encode ( self . root ) , 'hmac' : hb_encode ( self . hmac ) , 'timestamp' : self . timestamp }
|
def get_private_rooms ( self , ** kwargs ) :
"""Get a listing of all private rooms with their names and IDs"""
|
return GetPrivateRooms ( settings = self . settings , ** kwargs ) . call ( ** kwargs )
|
def _delete_stale ( self ) :
"""Delete files left in self . _ stale _ files . Also delete their directories if empty ."""
|
for name , hash_ in self . _stale_files . items ( ) :
path = self . download_root . joinpath ( name )
if not path . exists ( ) :
continue
current_hash = self . _path_hash ( path )
if current_hash == hash_ :
progress_logger . info ( 'deleting: %s which is stale...' , name )
path . unlink ( )
self . _stale_deleted += 1
while True :
path = path . parent
if path == self . download_root or list ( path . iterdir ( ) ) :
break
progress_logger . info ( 'deleting: %s which is stale..' , path . relative_to ( self . download_root ) )
path . rmdir ( )
else :
progress_logger . error ( 'Not deleting "%s" which is in the lock file but not the definition ' 'file, however appears to have been modified since it was downloaded. ' 'Please check and delete the file manually.' , name )
raise GrablibError ( 'stale file modified' )
|
def write_metadata ( self , symbol , metadata , prune_previous_version = True , ** kwargs ) :
"""Write ' metadata ' under the specified ' symbol ' name to this library .
The data will remain unchanged . A new version will be created .
If the symbol is missing , it causes a write with empty data ( None , pickled , can ' t append )
and the supplied metadata .
Returns a VersionedItem object only with a metadata element .
Fast operation : Zero data / segment read / write operations .
Parameters
symbol : ` str `
symbol name for the item
metadata : ` dict ` or ` None `
dictionary of metadata to persist along with the symbol
prune _ previous _ version : ` bool `
Removes previous ( non - snapshotted ) versions from the database .
Default : True
kwargs :
passed through to the write handler ( only used if symbol does not already exist or is deleted )
Returns
` VersionedItem `
VersionedItem named tuple containing the metadata of the written symbol ' s version document in the store ."""
|
# Make a normal write with empty data and supplied metadata if symbol does not exist
try :
previous_version = self . _read_metadata ( symbol )
except NoDataFoundException :
return self . write ( symbol , data = None , metadata = metadata , prune_previous_version = prune_previous_version , ** kwargs )
# Reaching here means that and / or metadata exist and we are set to update the metadata
new_version_num = self . _version_nums . find_one_and_update ( { 'symbol' : symbol } , { '$inc' : { 'version' : 1 } } , upsert = True , new = True ) [ 'version' ]
# Populate the new version entry , preserving existing data , and updating with the supplied metadata
version = { k : previous_version [ k ] for k in previous_version . keys ( ) if k != 'parent' }
# don ' t copy snapshots
version [ '_id' ] = bson . ObjectId ( )
version [ 'version' ] = new_version_num
version [ 'metadata' ] = metadata
version [ 'base_version_id' ] = previous_version . get ( 'base_version_id' , previous_version [ '_id' ] )
return self . _add_new_version_using_reference ( symbol , version , previous_version , prune_previous_version )
|
def backoff ( max_tries = constants . BACKOFF_DEFAULT_MAXTRIES , delay = constants . BACKOFF_DEFAULT_DELAY , factor = constants . BACKOFF_DEFAULT_FACTOR , exceptions = None ) :
"""Implements an exponential backoff decorator which will retry decorated
function upon given exceptions . This implementation is based on
` Retry < https : / / wiki . python . org / moin / PythonDecoratorLibrary # Retry > ` _ from
the * Python Decorator Library * .
: param int max _ tries : Number of tries before give up . Defaults to
: const : ` ~ escpos . constants . BACKOFF _ DEFAULT _ MAXTRIES ` .
: param int delay : Delay between retries ( in seconds ) . Defaults to
: const : ` ~ escpos . constants . BACKOFF _ DEFAULT _ DELAY ` .
: param int factor : Multiply factor in which delay will be increased for the
next retry . Defaults to : const : ` ~ escpos . constants . BACKOFF _ DEFAULT _ FACTOR ` .
: param exceptions : Tuple of exception types to catch that triggers retry .
Any exception not listed will break the decorator and retry routines
will not run .
: type exceptions : tuple [ Exception ]"""
|
if max_tries <= 0 :
raise ValueError ( 'Max tries must be greater than 0; got {!r}' . format ( max_tries ) )
if delay <= 0 :
raise ValueError ( 'Delay must be greater than 0; got {!r}' . format ( delay ) )
if factor <= 1 :
raise ValueError ( 'Backoff factor must be greater than 1; got {!r}' . format ( factor ) )
def outter ( f ) :
def inner ( * args , ** kwargs ) :
m_max_tries , m_delay = max_tries , delay
# make mutable
while m_max_tries > 0 :
try :
retval = f ( * args , ** kwargs )
except exceptions :
logger . exception ( 'backoff retry for: %r (max_tries=%r, delay=%r, ' 'factor=%r, exceptions=%r)' , f , max_tries , delay , factor , exceptions )
m_max_tries -= 1
# consume an attempt
if m_max_tries <= 0 :
raise
# run out of tries
time . sleep ( m_delay )
# wait . . .
m_delay *= factor
# make future wait longer
else : # we ' re done without errors
return retval
return inner
return outter
|
def do_page_truncate ( self , args : List [ str ] ) :
"""Read in a text file and display its output in a pager , truncating long lines if they don ' t fit .
Truncated lines can still be accessed by scrolling to the right using the arrow keys .
Usage : page _ chop < file _ path >"""
|
if not args :
self . perror ( 'page_truncate requires a path to a file as an argument' , traceback_war = False )
return
self . page_file ( args [ 0 ] , chop = True )
|
def corr_flat_und ( a1 , a2 ) :
'''Returns the correlation coefficient between two flattened adjacency
matrices . Only the upper triangular part is used to avoid double counting
undirected matrices . Similarity metric for weighted matrices .
Parameters
A1 : NxN np . ndarray
undirected matrix 1
A2 : NxN np . ndarray
undirected matrix 2
Returns
r : float
Correlation coefficient describing edgewise similarity of a1 and a2'''
|
n = len ( a1 )
if len ( a2 ) != n :
raise BCTParamError ( "Cannot calculate flattened correlation on " "matrices of different size" )
triu_ix = np . where ( np . triu ( np . ones ( ( n , n ) ) , 1 ) )
return np . corrcoef ( a1 [ triu_ix ] . flat , a2 [ triu_ix ] . flat ) [ 0 ] [ 1 ]
|
def norm ( self , order = 2 ) :
"""Find the vector norm , with the given order , of the values"""
|
return ( sum ( val ** order for val in abs ( self ) . values ( ) ) ) ** ( 1 / order )
|
def load_label ( self , idx ) :
"""Load label image as 1 x height x width integer array of label indices .
The leading singleton dimension is required by the loss .
The full 400 labels are translated to the 59 class task labels ."""
|
label_400 = scipy . io . loadmat ( '{}/trainval/{}.mat' . format ( self . context_dir , idx ) ) [ 'LabelMap' ]
label = np . zeros_like ( label_400 , dtype = np . uint8 )
for idx , l in enumerate ( self . labels_59 ) :
idx_400 = self . labels_400 . index ( l ) + 1
label [ label_400 == idx_400 ] = idx + 1
label = label [ np . newaxis , ... ]
return label
|
def tweak ( environment , opts ) :
"""Commands operating on environment data
Usage :
datacats tweak - - install - postgis [ ENVIRONMENT ]
datacats tweak - - add - redis [ ENVIRONMENT ]
datacats tweak - - admin - password [ ENVIRONMENT ]
Options :
- - install - postgis Install postgis in ckan database
- - add - redis Adds redis next time this environment reloads
- s - - site = NAME Choose a site to tweak [ default : primary ]
- p - - admin - password Prompt to change the admin password
ENVIRONMENT may be an environment name or a path to an environment directory .
Default : ' . '"""
|
environment . require_data ( )
if opts [ '--install-postgis' ] :
print "Installing postgis"
environment . install_postgis_sql ( )
if opts [ '--add-redis' ] : # Let the user know if they are trying to add it and it is already there
print ( 'Adding redis extra container... Please note that you will have ' 'to reload your environment for these changes to take effect ("datacats reload {}")' . format ( environment . name ) )
environment . add_extra_container ( 'redis' , error_on_exists = True )
if opts [ '--admin-password' ] :
environment . create_admin_set_password ( confirm_password ( ) )
|
def mrca_matrix ( self ) :
'''Return a dictionary storing all pairwise MRCAs . ` ` M [ u ] [ v ] ` ` = MRCA of nodes ` ` u ` ` and ` ` v ` ` . Excludes ` ` M [ u ] [ u ] ` ` because MRCA of node and itself is itself
Returns :
` ` dict ` ` : ` ` M [ u ] [ v ] ` ` = MRCA of nodes ` ` u ` ` and ` ` v ` `'''
|
M = dict ( )
leaves_below = dict ( )
for node in self . traverse_postorder ( ) :
leaves_below [ node ] = list ( )
if node . is_leaf ( ) :
leaves_below [ node ] . append ( node ) ;
M [ node ] = dict ( )
else :
for i in range ( len ( node . children ) - 1 ) :
for l1 in leaves_below [ node . children [ i ] ] :
leaves_below [ node ] . append ( l1 )
for j in range ( i + 1 , len ( node . children ) ) :
for l2 in leaves_below [ node . children [ j ] ] :
M [ l1 ] [ l2 ] = node ;
M [ l2 ] [ l1 ] = node
if len ( node . children ) != 1 :
for l2 in leaves_below [ node . children [ - 1 ] ] :
leaves_below [ node ] . append ( l2 )
return M
|
def get_class ( classname , all = False ) :
"""Retrieve a class from the registry .
: raises : marshmallow . exceptions . RegistryError if the class cannot be found
or if there are multiple entries for the given class name ."""
|
try :
classes = _registry [ classname ]
except KeyError :
raise RegistryError ( 'Class with name {!r} was not found. You may need ' 'to import the class.' . format ( classname ) )
if len ( classes ) > 1 :
if all :
return _registry [ classname ]
raise RegistryError ( 'Multiple classes with name {!r} ' 'were found. Please use the full, ' 'module-qualified path.' . format ( classname ) )
else :
return _registry [ classname ] [ 0 ]
|
def fetch_bookshelf ( start_url , output_dir ) :
"""Fetch all the books off of a gutenberg project bookshelf page
example bookshelf page ,
http : / / www . gutenberg . org / wiki / Children % 27s _ Fiction _ ( Bookshelf )"""
|
# make output directory
try :
os . mkdir ( OUTPUT_DIR + output_dir )
except OSError as e :
raise ( e )
# fetch page
r = requests . get ( start_url )
# extract links
soup = bs ( r . text , 'html.parser' )
book_links = soup . find_all ( class_ = re . compile ( "extiw" ) )
new_links = [ ]
for el in book_links :
link = el [ 'href' ]
title = el . text
bookid = link . split ( '/' ) [ - 1 ]
if bookid . isdigit ( ) :
new_link = NEW_LINK_BASE . format ( bookid , bookid )
new_links . append ( [ title , new_link ] )
# save links as books
for link_tup in new_links :
time . sleep ( .10 )
# be nice to project gutenberg
r1 = requests . get ( link_tup [ 1 ] )
new_filename = link_tup [ 0 ] . lower ( ) . replace ( ' ' , '-' ) . replace ( '\n' , '-' )
new_new_filename = ''
for char in new_filename :
if char in 'abcdefghijklmnopqrstuvwxyz-' :
new_new_filename += char
new_filename = new_new_filename [ : MAX_FILENAME_LEN ] + '.txt'
with open ( OUTPUT_DIR + output_dir + '/' + new_filename , 'w+' ) as output_file :
output_file . write ( r1 . text )
return None
|
def env_to_statement ( env ) :
'''Return the abstraction description of an environment variable definition
into a statement for shell script .
> > > env _ to _ statement ( dict ( name = ' X ' , value = ' Y ' ) )
' X = " Y " ; export X '
> > > env _ to _ statement ( dict ( name = ' X ' , value = ' Y ' , raw = True ) )
' X = Y ; export X '
> > > env _ to _ statement ( dict ( name = ' X ' , value = ' " A " , " B " , " C " ' ) )
' X = " \\ \\ " A \\ \\ " , \\ \\ " B \\ \\ " , \\ \\ " C \\ \\ " " ; export X '
> > > env _ to _ statement ( dict ( file = " Y " ) )
> > > env _ to _ statement ( dict ( file = " ' RAW $ FILE ' " , raw = True ) )
" . ' RAW $ FILE ' "
> > > # Source file takes precedence
> > > env _ to _ statement ( dict ( name = ' X ' , value = ' " A " , " B " , " C " ' , file = " S " ) )
> > > env _ to _ statement ( dict ( execute = " module load java / 1.5.1 " ) )
' module load java / 1.5.1' '''
|
source_file = env . get ( 'file' , None )
if source_file :
return '. %s' % __escape ( source_file , env )
execute = env . get ( 'execute' , None )
if execute :
return execute
name = env [ 'name' ]
value = __escape ( env [ 'value' ] , env )
return '%s=%s; export %s' % ( name , value , name )
|
def remove_markup ( s ) :
"""Remove all < * > html markup tags from s ."""
|
mo = _markup_re . search ( s )
while mo :
s = s [ 0 : mo . start ( ) ] + s [ mo . end ( ) : ]
mo = _markup_re . search ( s )
return s
|
def padding ( s , bs = AES . block_size ) :
"""Fills a bytes - like object with arbitrary symbols to make its length divisible by ` bs ` ."""
|
s = to_bytes ( s )
if len ( s ) % bs == 0 :
res = s + b'' . join ( map ( to_bytes , [ random . SystemRandom ( ) . choice ( string . ascii_lowercase + string . digits ) for _ in range ( bs - 1 ) ] ) ) + to_bytes ( chr ( 96 - bs ) )
elif len ( s ) % bs > 0 and len ( s ) > bs :
res = s + b'' . join ( map ( to_bytes , [ random . SystemRandom ( ) . choice ( string . ascii_lowercase + string . digits ) for _ in range ( bs - len ( s ) % bs - 1 ) ] ) ) + to_bytes ( chr ( 96 + len ( s ) % bs - bs ) )
else :
res = s + b'' . join ( map ( to_bytes , [ random . SystemRandom ( ) . choice ( string . ascii_lowercase + string . digits ) for _ in range ( bs - len ( s ) - 1 ) ] ) ) + to_bytes ( chr ( 96 + len ( s ) - bs ) )
return res
|
def _resume_with_session_id ( self , server_info : ServerConnectivityInfo , ssl_version_to_use : OpenSslVersionEnum ) -> bool :
"""Perform one session resumption using Session IDs ."""
|
session1 = self . _resume_ssl_session ( server_info , ssl_version_to_use )
try : # Recover the session ID
session1_id = self . _extract_session_id ( session1 )
except IndexError : # Session ID not assigned
return False
if session1_id == '' : # Session ID empty
return False
# Try to resume that SSL session
session2 = self . _resume_ssl_session ( server_info , ssl_version_to_use , session1 )
try : # Recover the session ID
session2_id = self . _extract_session_id ( session2 )
except IndexError : # Session ID not assigned
return False
# Finally , compare the two Session IDs
if session1_id != session2_id : # Session ID assigned but not accepted
return False
return True
|
def followers ( self ) :
"""获取关注此问题的用户
: return : 关注此问题的用户
: rtype : Author . Iterable
: 问题 : 要注意若执行过程中另外有人关注 , 可能造成重复获取到某些用户"""
|
self . _make_soup ( )
followers_url = self . url + 'followers'
for x in common_follower ( followers_url , self . xsrf , self . _session ) :
yield x
|
def _patch_multiple ( target , spec = None , create = False , spec_set = None , autospec = None , new_callable = None , ** kwargs ) :
"""Perform multiple patches in a single call . It takes the object to be
patched ( either as an object or a string to fetch the object by importing )
and keyword arguments for the patches : :
with patch . multiple ( settings , FIRST _ PATCH = ' one ' , SECOND _ PATCH = ' two ' ) :
Use ` DEFAULT ` as the value if you want ` patch . multiple ` to create
mocks for you . In this case the created mocks are passed into a decorated
function by keyword , and a dictionary is returned when ` patch . multiple ` is
used as a context manager .
` patch . multiple ` can be used as a decorator , class decorator or a context
manager . The arguments ` spec ` , ` spec _ set ` , ` create ` ,
` autospec ` and ` new _ callable ` have the same meaning as for ` patch ` . These
arguments will be applied to * all * patches done by ` patch . multiple ` .
When used as a class decorator ` patch . multiple ` honours ` patch . TEST _ PREFIX `
for choosing which methods to wrap ."""
|
if type ( target ) in ( unicode , str ) :
getter = lambda : _importer ( target )
else :
getter = lambda : target
if not kwargs :
raise ValueError ( 'Must supply at least one keyword argument with patch.multiple' )
# need to wrap in a list for python 3 , where items is a view
items = list ( kwargs . items ( ) )
attribute , new = items [ 0 ]
patcher = _patch ( getter , attribute , new , spec , create , spec_set , autospec , new_callable , { } )
patcher . attribute_name = attribute
for attribute , new in items [ 1 : ] :
this_patcher = _patch ( getter , attribute , new , spec , create , spec_set , autospec , new_callable , { } )
this_patcher . attribute_name = attribute
patcher . additional_patchers . append ( this_patcher )
return patcher
|
def set_server_handler ( self , handler_func , name = None , header_filter = None , alias = None , interval = 0.5 ) :
"""Sets an automatic handler for the type of message template currently loaded .
This feature allows users to set a python handler function which is called
automatically by the Rammbock message queue when message matches the expected
template . The optional name argument defines the server node to which the
handler will be bound . Otherwise the default server will be used .
The header _ filter defines which header field will be used to identify the
message defined in template . ( Otherwise all incoming messages will match ! )
The interval defines the interval in seconds on which the handler will
be called on background . By default the incoming messages are checked
every 0.5 seconds .
The alias is the alias for the connection . By default the current active
connection will be used .
The handler function will be called with two arguments : the rammbock library
instance and the received message .
Example :
| Load template | SomeMessage |
| Set server handler | my _ module . respond _ to _ sample | messageType |
my _ module . py :
| def respond _ to _ sample ( rammbock , msg ) :
| rammbock . save _ template ( " _ _ backup _ template " , unlocked = True )
| try :
| rammbock . load _ template ( " sample response " )
| rammbock . server _ sends _ message ( )
| finally :
| rammbock . load _ template ( " _ _ backup _ template " )"""
|
msg_template = self . _get_message_template ( )
server , server_name = self . _servers . get_with_name ( name )
server . set_handler ( msg_template , handler_func , header_filter = header_filter , alias = alias , interval = interval )
|
def parse_text ( file_name ) :
"""Parse data from Ohio State University text mocap files ( http : / / accad . osu . edu / research / mocap / mocap _ data . htm ) ."""
|
# Read the header
fid = open ( file_name , 'r' )
point_names = np . array ( fid . readline ( ) . split ( ) ) [ 2 : - 1 : 3 ]
fid . close ( )
for i in range ( len ( point_names ) ) :
point_names [ i ] = point_names [ i ] [ 0 : - 2 ]
# Read the matrix data
S = np . loadtxt ( file_name , skiprows = 1 )
field = np . uint ( S [ : , 0 ] )
times = S [ : , 1 ]
S = S [ : , 2 : ]
# Set the - 9999.99 markers to be not present
S [ S == - 9999.99 ] = np . NaN
# Store x , y and z in different arrays
points = [ ]
points . append ( S [ : , 0 : - 1 : 3 ] )
points . append ( S [ : , 1 : - 1 : 3 ] )
points . append ( S [ : , 2 : - 1 : 3 ] )
return points , point_names , times
|
def toProtocolElement ( self ) :
"""Returns the representation of this CallSet as the corresponding
ProtocolElement ."""
|
variantSet = self . getParentContainer ( )
gaCallSet = protocol . CallSet ( biosample_id = self . getBiosampleId ( ) )
if variantSet . getCreationTime ( ) :
gaCallSet . created = variantSet . getCreationTime ( )
if variantSet . getUpdatedTime ( ) :
gaCallSet . updated = variantSet . getUpdatedTime ( )
gaCallSet . id = self . getId ( )
gaCallSet . name = self . getLocalId ( )
gaCallSet . variant_set_ids . append ( variantSet . getId ( ) )
self . serializeAttributes ( gaCallSet )
return gaCallSet
|
def rescale_variables ( df , variables_include = [ ] , variables_exclude = [ ] ) :
"""Rescale variables in a DataFrame , excluding variables with NaNs and strings ,
excluding specified variables , and including specified variables ."""
|
variables_not_rescale = variables_exclude
variables_not_rescale . extend ( df . columns [ df . isna ( ) . any ( ) ] . tolist ( ) )
# variables with NaNs
variables_not_rescale . extend ( df . select_dtypes ( include = [ "object" , "datetime" , "timedelta" ] ) . columns )
# variables with strings
variables_rescale = list ( set ( df . columns ) - set ( variables_not_rescale ) )
variables_rescale . extend ( variables_include )
scaler = MinMaxScaler ( )
df [ variables_rescale ] = scaler . fit_transform ( df [ variables_rescale ] )
return df
|
def call_filter ( self , name , value , args = None , kwargs = None , context = None , eval_ctx = None ) :
"""Invokes a filter on a value the same way the compiler does it .
Note that on Python 3 this might return a coroutine in case the
filter is running from an environment in async mode and the filter
supports async execution . It ' s your responsibility to await this
if needed .
. . versionadded : : 2.7"""
|
func = self . filters . get ( name )
if func is None :
fail_for_missing_callable ( 'no filter named %r' , name )
args = [ value ] + list ( args or ( ) )
if getattr ( func , 'contextfilter' , False ) :
if context is None :
raise TemplateRuntimeError ( 'Attempted to invoke context ' 'filter without context' )
args . insert ( 0 , context )
elif getattr ( func , 'evalcontextfilter' , False ) :
if eval_ctx is None :
if context is not None :
eval_ctx = context . eval_ctx
else :
eval_ctx = EvalContext ( self )
args . insert ( 0 , eval_ctx )
elif getattr ( func , 'environmentfilter' , False ) :
args . insert ( 0 , self )
return func ( * args , ** ( kwargs or { } ) )
|
def process_post ( self , post , render = True ) :
"""A high level view to create post processing .
: param post : Dictionary representing the post
: type post : dict
: param render : Choice if the markdown text has to be converted or not
: type render : bool
: return :"""
|
post_processor = self . post_processor
post_processor . process ( post , render )
try :
author = self . user_callback ( post [ "user_id" ] )
except Exception :
raise Exception ( "No user_loader has been installed for this " "BloggingEngine. Add one with the " "'BloggingEngine.user_loader' decorator." )
if author is not None :
post [ "user_name" ] = self . get_user_name ( author )
post_processed . send ( self . app , engine = self , post = post , render = render )
|
def fbresnet152 ( num_classes = 1000 , pretrained = 'imagenet' ) :
"""Constructs a ResNet - 152 model .
Args :
pretrained ( bool ) : If True , returns a model pre - trained on ImageNet"""
|
model = FBResNet ( Bottleneck , [ 3 , 8 , 36 , 3 ] , num_classes = num_classes )
if pretrained is not None :
settings = pretrained_settings [ 'fbresnet152' ] [ pretrained ]
assert num_classes == settings [ 'num_classes' ] , "num_classes should be {}, but is {}" . format ( settings [ 'num_classes' ] , num_classes )
model . load_state_dict ( model_zoo . load_url ( settings [ 'url' ] ) )
model . input_space = settings [ 'input_space' ]
model . input_size = settings [ 'input_size' ]
model . input_range = settings [ 'input_range' ]
model . mean = settings [ 'mean' ]
model . std = settings [ 'std' ]
return model
|
def set_nweight ( self , node_from , node_to , weight_there , weight_back ) :
r"""Set a single n - weight / edge - weight .
Parameters
node _ from : int
Node - id from the first node of the edge .
node _ to : int
Node - id from the second node of the edge .
weight _ there : float
Weight from first to second node ( > 0 ) .
weight _ back : float
Weight from second to first node ( > 0 ) .
Raises
ValueError
If a passed node id does not refer to any node of the graph
( i . e . it is either higher than the initially set number of
nodes or lower than zero ) .
ValueError
If the two node - ids of the edge are the same ( graph cut does
not allow self - edges ) .
ValueError
If one of the passed weights is < = 0.
Notes
The object does not check if the number of supplied edges in total exceeds
the number passed to the init - method . If this is the case , the underlying
C + + implementation will double the memory , which is very unefficient .
The underlying C + + implementation allows zero weights , but these are highly
undesirable for inter - node weights and therefore raise an error ."""
|
if node_from >= self . __nodes or node_from < 0 :
raise ValueError ( 'Invalid node id (node_from) of {}. Valid values are 0 to {}.' . format ( node_from , self . __nodes - 1 ) )
elif node_to >= self . __nodes or node_to < 0 :
raise ValueError ( 'Invalid node id (node_to) of {}. Valid values are 0 to {}.' . format ( node_to , self . __nodes - 1 ) )
elif node_from == node_to :
raise ValueError ( 'The node_from ({}) can not be equal to the node_to ({}) (self-connections are forbidden in graph cuts).' . format ( node_from , node_to ) )
elif weight_there <= 0 or weight_back <= 0 :
raise ValueError ( 'Negative or zero weights are not allowed.' )
self . __graph . sum_edge ( int ( node_from ) , int ( node_to ) , float ( weight_there ) , float ( weight_back ) )
|
def help ( self , * args ) :
"""Can be overridden ( and for example _ Menu does ) ."""
|
if args :
self . messages . error ( self . messages . command_does_not_accept_arguments )
else :
print ( self . helpfull )
|
def _validate_header ( self , hed ) :
"""Validate the list that represents the table header .
: param hed : The list that represents the table header .
: type hed : list ( list ( hatemile . util . html . htmldomelement . HTMLDOMElement ) )
: return : True if the table header is valid or False if the table header
is not valid .
: rtype : bool"""
|
# pylint : disable = no - self - use
if not bool ( hed ) :
return False
length = - 1
for row in hed :
if not bool ( row ) :
return False
elif length == - 1 :
length = len ( row )
elif len ( row ) != length :
return False
return True
|
def _GetShowID ( self , stringSearch , origStringSearch = None ) :
"""Search for given string as an existing entry in the database file name
table or , if no match is found , as a show name from the TV guide .
If an exact match is not found in the database the user can accept
or decline the best match from the TV guide or can provide an alternate
match to lookup .
Parameters
stringSearch : string
String to look up in database or guide .
origStringSearch : string [ optional : default = None ]
Original search string , used by recusive function calls .
Returns
tvfile . ShowInfo or None
If no show id could be found this returns None , otherwise
it returns a tvfile . ShowInfo object containing show name
and show id ."""
|
showInfo = tvfile . ShowInfo ( )
if origStringSearch is None :
goodlogging . Log . Info ( "RENAMER" , "Looking up show ID for: {0}" . format ( stringSearch ) )
origStringSearch = stringSearch
goodlogging . Log . IncreaseIndent ( )
showInfo . showID = self . _db . SearchFileNameTable ( stringSearch )
if showInfo . showID is None :
goodlogging . Log . Info ( "RENAMER" , "No show ID match found for '{0}' in database" . format ( stringSearch ) )
showNameList = self . _guide . ShowNameLookUp ( stringSearch )
if self . _skipUserInput is True :
if len ( showNameList ) == 1 :
showName = showNameList [ 0 ]
goodlogging . Log . Info ( "RENAMER" , "Automatic selection of showname: {0}" . format ( showName ) )
else :
showName = None
goodlogging . Log . Info ( "RENAMER" , "Show skipped - could not make automatic selection of showname" )
else :
showName = util . UserAcceptance ( showNameList )
if showName in showNameList :
libEntry = self . _db . SearchTVLibrary ( showName = showName )
if libEntry is None :
if self . _skipUserInput is True :
response = 'y'
else :
goodlogging . Log . Info ( "RENAMER" , "No show by this name found in TV library database. Is this a new show for the database?" )
response = goodlogging . Log . Input ( "RENAMER" , "Enter 'y' (yes), 'n' (no) or 'ls' (list existing shows): " )
response = util . ValidUserResponse ( response , ( 'y' , 'n' , 'ls' ) )
if response . lower ( ) == 'ls' :
dbLibList = self . _db . SearchTVLibrary ( )
if dbLibList is None :
goodlogging . Log . Info ( "RENAMER" , "TV library is empty" )
response = 'y'
else :
dbShowNameList = [ i [ 1 ] for i in dbLibList ]
dbShowNameStr = ', ' . join ( dbShowNameList )
goodlogging . Log . Info ( "RENAMER" , "Existing shows in database are: {0}" . format ( dbShowNameStr ) )
response = goodlogging . Log . Input ( "RENAMER" , "Is this a new show? [y/n]: " )
response = util . ValidUserResponse ( response , ( 'y' , 'n' ) )
if response . lower ( ) == 'y' :
showInfo . showID = self . _db . AddShowToTVLibrary ( showName )
showInfo . showName = showName
else :
try :
dbShowNameList
except NameError :
dbLibList = self . _db . SearchTVLibrary ( )
if dbLibList is None :
goodlogging . Log . Info ( "RENAMER" , "No show ID found - TV library is empty" )
return None
dbShowNameList = [ i [ 1 ] for i in dbLibList ]
while showInfo . showID is None :
matchShowList = util . GetBestMatch ( showName , dbShowNameList )
showName = util . UserAcceptance ( matchShowList )
if showName is None :
goodlogging . Log . Info ( "RENAMER" , "No show ID found - could not match to existing show" )
return None
elif showName in matchShowList :
showInfo . showID = self . _db . SearchTVLibrary ( showName = showName ) [ 0 ] [ 0 ]
showInfo . showName = showName
else :
showInfo . showID = libEntry [ 0 ] [ 0 ]
self . _db . AddToFileNameTable ( origStringSearch , showInfo . showID )
goodlogging . Log . DecreaseIndent ( )
return showInfo
elif showName is None :
goodlogging . Log . DecreaseIndent ( )
return None
else :
goodlogging . Log . DecreaseIndent ( )
return self . _GetShowID ( showName , origStringSearch )
else :
goodlogging . Log . Info ( "RENAMER" , "Match found: show ID = {0}" . format ( showInfo . showID ) )
if origStringSearch != stringSearch :
self . _db . AddToFileNameTable ( origStringSearch , showInfo . showID )
goodlogging . Log . DecreaseIndent ( )
return showInfo
|
def get_sampleV ( self , res , DV = None , resMode = 'abs' , ind = None , Out = '(X,Y,Z)' ) :
"""Sample , with resolution res , the volume defined by DV or ind"""
|
args = [ self . Poly , self . dgeom [ 'P1Min' ] [ 0 ] , self . dgeom [ 'P1Max' ] [ 0 ] , self . dgeom [ 'P2Min' ] [ 1 ] , self . dgeom [ 'P2Max' ] [ 1 ] , res ]
kwdargs = dict ( DV = DV , dVMode = resMode , ind = ind , VType = self . Id . Type , VLim = self . Lim , Out = Out , margin = 1.e-9 )
pts , dV , ind , reseff = _comp . _Ves_get_sampleV ( * args , ** kwdargs )
return pts , dV , ind , reseff
|
def truncate_to ( self , cert ) :
"""Remove all certificates in the path after the cert specified
: param cert :
An asn1crypto . x509 . Certificate object to find
: raises :
LookupError - when the certificate could not be found
: return :
The current ValidationPath object , for chaining"""
|
cert_index = None
for index , entry in enumerate ( self ) :
if entry . issuer_serial == cert . issuer_serial :
cert_index = index
break
if cert_index is None :
raise LookupError ( 'Unable to find the certificate specified' )
while len ( self ) > cert_index + 1 :
self . pop ( )
return self
|
def check_user ( self , user_id ) :
"""Check whether this user can read this dataset"""
|
if self . hidden == 'N' :
return True
for owner in self . owners :
if int ( owner . user_id ) == int ( user_id ) :
if owner . view == 'Y' :
return True
return False
|
def export ( self , class_name , method_name , export_data = False , export_dir = '.' , export_filename = 'data.json' , export_append_checksum = False , ** kwargs ) :
"""Port a trained estimator to the syntax of a chosen programming language .
Parameters
: param class _ name : string
The name of the class in the returned result .
: param method _ name : string
The name of the method in the returned result .
: param export _ data : bool , default : False
Whether the model data should be saved or not .
: param export _ dir : string , default : ' . ' ( current directory )
The directory where the model data should be saved .
: param export _ filename : string , default : ' data . json '
The filename of the exported model data .
: param export _ append _ checksum : bool , default : False
Whether to append the checksum to the filename or not .
Returns
: return : string
The transpiled algorithm with the defined placeholders ."""
|
# Arguments :
self . class_name = class_name
self . method_name = method_name
# Estimator :
est = self . estimator
self . output_activation = est . out_activation_
self . hidden_activation = est . activation
self . n_layers = est . n_layers_
self . n_hidden_layers = est . n_layers_ - 2
self . n_inputs = len ( est . coefs_ [ 0 ] )
self . n_outputs = est . n_outputs_
self . hidden_layer_sizes = est . hidden_layer_sizes
if isinstance ( self . hidden_layer_sizes , int ) :
self . hidden_layer_sizes = [ self . hidden_layer_sizes ]
self . hidden_layer_sizes = list ( self . hidden_layer_sizes )
self . layer_units = [ self . n_inputs ] + self . hidden_layer_sizes + [ est . n_outputs_ ]
# Weights :
self . coefficients = est . coefs_
# Bias :
self . intercepts = est . intercepts_
# Binary or multiclass classifier ?
self . is_binary = self . n_outputs == 1
self . prefix = 'binary' if self . is_binary else 'multi'
if self . target_method == 'predict' : # Exported :
if export_data and os . path . isdir ( export_dir ) :
self . export_data ( export_dir , export_filename , export_append_checksum )
return self . predict ( 'exported' )
# Separated :
return self . predict ( 'separated' )
|
def _authenticate ( ) :
'''Retrieve CSRF and API tickets for the Proxmox API'''
|
global url , port , ticket , csrf , verify_ssl
url = config . get_cloud_config_value ( 'url' , get_configured_provider ( ) , __opts__ , search_global = False )
port = config . get_cloud_config_value ( 'port' , get_configured_provider ( ) , __opts__ , default = 8006 , search_global = False )
username = config . get_cloud_config_value ( 'user' , get_configured_provider ( ) , __opts__ , search_global = False ) ,
passwd = config . get_cloud_config_value ( 'password' , get_configured_provider ( ) , __opts__ , search_global = False )
verify_ssl = config . get_cloud_config_value ( 'verify_ssl' , get_configured_provider ( ) , __opts__ , default = True , search_global = False )
connect_data = { 'username' : username , 'password' : passwd }
full_url = 'https://{0}:{1}/api2/json/access/ticket' . format ( url , port )
returned_data = requests . post ( full_url , verify = verify_ssl , data = connect_data ) . json ( )
ticket = { 'PVEAuthCookie' : returned_data [ 'data' ] [ 'ticket' ] }
csrf = six . text_type ( returned_data [ 'data' ] [ 'CSRFPreventionToken' ] )
|
def move_images ( self , image_directory ) :
"""Moves png - files one directory up from path / image / * . png - > path / * . png"""
|
image_paths = glob ( image_directory + "/**/*.png" , recursive = True )
for image_path in image_paths :
destination = image_path . replace ( "\\image\\" , "\\" )
shutil . move ( image_path , destination )
image_folders = glob ( image_directory + "/**/image" , recursive = True )
for image_folder in image_folders :
os . removedirs ( image_folder )
|
def GetMultipleTags ( tag_list , start_time , end_time , sampling_rate = None , fill_limit = 99999 , verify_time = False , desc_as_label = False , utc = False ) :
"""Retrieves raw data from eDNA history for multiple tags , merging them into
a single DataFrame , and resampling the data according to the specified
sampling _ rate .
: param tag _ list : a list of fully - qualified ( site . service . tag ) eDNA tags
: param start _ time : must be in format mm / dd / yy hh : mm : ss
: param end _ time : must be in format mm / dd / yy hh : mm : ss
: param sampling _ rate : in units of seconds
: param fill _ limit : in units of data points
: param verify _ time : verify that the time is not before or after the query
: param desc _ as _ label : use the tag description as the column name instead
of the full tag
: param utc : if True , use the integer time format instead of DateTime
: return : a pandas DataFrame with timestamp and values"""
|
# Since we are pulling data from multiple tags , let ' s iterate over each
# one . For this case , we only want to pull data using the " raw " method ,
# which will obtain all data as it is actually stored in the historian .
dfs = [ ]
columns_names = [ ]
for tag in tag_list :
df = GetHist ( tag , start_time , end_time , utc = utc )
if not df . empty : # Sometimes a duplicate index / value pair is retrieved from
# eDNA , which will cause the concat to fail if not removed
# df . drop _ duplicates ( inplace = True )
df = df [ ~ df . index . duplicated ( keep = 'first' ) ]
# If the user wants to use descriptions as labels , we need to
# ensure that only unique labels are used
label = tag
if desc_as_label :
orig_label = _GetLabel ( tag )
label = orig_label
rename_number = 2
while label in columns_names :
label = orig_label + str ( rename_number )
rename_number += 1
columns_names . append ( label )
df . rename ( columns = { tag : label } , inplace = True )
# Add the DataFrame to the list , to be concatenated later
dfs . append ( pd . DataFrame ( df [ label ] ) )
# Next , we concatenate all the DataFrames using an outer join ( default ) .
# Verify integrity is slow , but it ensures that the concatenation
# worked correctly .
if dfs :
merged_df = pd . concat ( dfs , axis = 1 , verify_integrity = True )
merged_df = merged_df . fillna ( method = "ffill" , limit = fill_limit )
else :
warnings . warn ( 'WARNING- No data retrieved for any tags. ' + 'Check eDNA connection, ensure that the start time is ' + 'not later than the end time, verify that the ' + 'DateTime formatting matches eDNA requirements, and ' + 'check that data exists in the query time period.' )
return pd . DataFrame ( )
# eDNA sometimes pulls data too early or too far - let ' s filter out all
# the data that is not within our original criteria .
if verify_time :
start_np = pd . to_datetime ( start_time )
end_np = pd . to_datetime ( end_time )
mask = ( merged_df . index > start_np ) & ( merged_df . index <= end_np )
merged_df = merged_df . loc [ mask ]
# Finally , we resample the data at the rate requested by the user .
if sampling_rate :
sampling_string = str ( sampling_rate ) + "S"
merged_df = merged_df . resample ( sampling_string ) . fillna ( method = "ffill" , limit = fill_limit )
return merged_df
|
def GET_getitemtypes ( self ) -> None :
"""Get the types of all current exchange items supposed to return
the values of | Parameter | or | Sequence | objects or the time series
of | IOSequence | objects ."""
|
for item in state . getitems :
type_ = self . _get_itemtype ( item )
for name , _ in item . yield_name2value ( ) :
self . _outputs [ name ] = type_
|
def phrase_strings ( self , phrase_type ) :
"""Returns strings corresponding all phrases matching a given phrase type
: param phrase _ type : POS such as " NP " , " VP " , " det " , etc .
: type phrase _ type : str
: return : a list of strings representing those phrases"""
|
return [ u" " . join ( subtree . leaves ( ) ) for subtree in self . subtrees_for_phrase ( phrase_type ) ]
|
def _handle_create ( self , response , ignore_tombstone , auto_refresh ) :
'''Handles response from self . create ( )
Args :
response ( requests . models . Response ) : response object from self . create ( )
ignore _ tombstone ( bool ) : If True , will attempt creation , if tombstone exists ( 409 ) , will delete tombstone and retry'''
|
# 201 , success , refresh
if response . status_code == 201 : # if not specifying uri , capture from response and append to object
self . uri = self . repo . parse_uri ( response . text )
# creation successful
if auto_refresh :
self . refresh ( )
elif auto_refresh == None :
if self . repo . default_auto_refresh :
self . refresh ( )
# fire resource . _ post _ create hook if exists
if hasattr ( self , '_post_create' ) :
self . _post_create ( auto_refresh = auto_refresh )
# 404 , assumed POST , target location does not exist
elif response . status_code == 404 :
raise Exception ( 'HTTP 404, for this POST request target location does not exist' )
# 409 , conflict , resource likely exists
elif response . status_code == 409 :
raise Exception ( 'HTTP 409, resource already exists' )
# 410 , tombstone present
elif response . status_code == 410 :
if ignore_tombstone :
response = self . repo . api . http_request ( 'DELETE' , '%s/fcr:tombstone' % self . uri )
if response . status_code == 204 :
logger . debug ( 'tombstone removed, retrying create' )
self . create ( )
else :
raise Exception ( 'HTTP %s, Could not remove tombstone for %s' % ( response . status_code , self . uri ) )
else :
raise Exception ( 'tombstone for %s detected, aborting' % self . uri )
# 415 , unsupported media type
elif response . status_code == 415 :
raise Exception ( 'HTTP 415, unsupported media type' )
# unknown status code
else :
raise Exception ( 'HTTP %s, unknown error creating resource' % response . status_code )
# if all goes well , return self
return self
|
def max_word_width ( myDict ) :
'''currd = { 0 : ' AutoPauseSpeed ' , 125 : ' HRLimitLow ' , 6 : ' Activity ' }
max _ wordwidth ( currd )'''
|
maxValueWidth = 0
for each in myDict :
eachValueWidth = myDict [ each ] . __len__ ( )
if ( eachValueWidth > maxValueWidth ) :
maxValueWidth = eachValueWidth
return ( maxValueWidth )
|
def find_outer_region ( im , r = 0 ) :
r"""Finds regions of the image that are outside of the solid matrix .
This function uses the rolling ball method to define where the outer region
ends and the void space begins .
This function is particularly useful for samples that do not fill the
entire rectangular image , such as cylindrical cores or samples with non -
parallel faces .
Parameters
im : ND - array
Image of the porous material with 1 ' s for void and 0 ' s for solid
r : scalar
The radius of the rolling ball to use . If not specified then a value
is calculated as twice maximum of the distance transform . The image
size is padded by this amount in all directions , so the image can
become quite large and unwieldy if too large a value is given .
Returns
image : ND - array
A boolean mask the same shape as ` ` im ` ` , containing True in all voxels
identified as * outside * the sample ."""
|
if r == 0 :
dt = spim . distance_transform_edt ( input = im )
r = int ( sp . amax ( dt ) ) * 2
im_padded = sp . pad ( array = im , pad_width = r , mode = 'constant' , constant_values = True )
dt = spim . distance_transform_edt ( input = im_padded )
seeds = ( dt >= r ) + get_border ( shape = im_padded . shape )
# Remove seeds not connected to edges
labels = spim . label ( seeds ) [ 0 ]
mask = labels == 1
# Assume label of 1 on edges , assured by adding border
dt = spim . distance_transform_edt ( ~ mask )
outer_region = dt < r
outer_region = extract_subsection ( im = outer_region , shape = im . shape )
return outer_region
|
def wait_for_task ( task_data , task_uri = '/tasks' ) :
"""Run task and check the result .
Args :
task _ data ( str ) : the task json to execute
Returns :
str : Task status ."""
|
taskid = post_task ( task_data , task_uri )
if isinstance ( task_data , str ) :
json_data = json . loads ( task_data )
else :
json_data = task_data
# inspect the task to see if a timeout is configured
job = json_data [ 'job' ] [ 0 ]
env = job . get ( 'credentials' )
task_type = job . get ( 'type' )
timeout = TASK_TIMEOUTS . get ( env , dict ( ) ) . get ( task_type , DEFAULT_TASK_TIMEOUT )
LOG . debug ( "Task %s will timeout after %s" , task_type , timeout )
return check_task ( taskid , timeout )
|
def tt_comp ( self , sampled_topics ) :
"""Compute term - topic matrix from sampled _ topics ."""
|
samples = sampled_topics . shape [ 0 ]
tt = np . zeros ( ( self . V , self . K , samples ) )
for s in range ( samples ) :
tt [ : , : , s ] = samplers_lda . tt_comp ( self . tokens , sampled_topics [ s , : ] , self . N , self . V , self . K , self . beta )
return tt
|
def unique_everseen ( iterable , key = None ) :
"""Yield unique elements , preserving order .
> > > list ( unique _ everseen ( ' AAAABBBCCDAABBB ' ) )
[ ' A ' , ' B ' , ' C ' , ' D ' ]
> > > list ( unique _ everseen ( ' ABBCcAD ' , str . lower ) )
[ ' A ' , ' B ' , ' C ' , ' D ' ]
Sequences with a mix of hashable and unhashable items can be used .
The function will be slower ( i . e . , ` O ( n ^ 2 ) ` ) for unhashable items .
Remember that ` ` list ` ` objects are unhashable - you can use the * key *
parameter to transform the list to a tuple ( which is hashable ) to
avoid a slowdown .
> > > iterable = ( [ 1 , 2 ] , [ 2 , 3 ] , [ 1 , 2 ] )
> > > list ( unique _ everseen ( iterable ) ) # Slow
[ [ 1 , 2 ] , [ 2 , 3 ] ]
> > > list ( unique _ everseen ( iterable , key = tuple ) ) # Faster
[ [ 1 , 2 ] , [ 2 , 3 ] ]
Similary , you may want to convert unhashable ` ` set ` ` objects with
` ` key = frozenset ` ` . For ` ` dict ` ` objects ,
` ` key = lambda x : frozenset ( x . items ( ) ) ` ` can be used ."""
|
seenset = set ( )
seenset_add = seenset . add
seenlist = [ ]
seenlist_add = seenlist . append
if key is None :
for element in iterable :
try :
if element not in seenset :
seenset_add ( element )
yield element
except TypeError :
if element not in seenlist :
seenlist_add ( element )
yield element
else :
for element in iterable :
k = key ( element )
try :
if k not in seenset :
seenset_add ( k )
yield element
except TypeError :
if k not in seenlist :
seenlist_add ( k )
yield element
|
def get ( self , name , handler , request = None ) :
"""Begin Fetch of current value of a PV
: param name : A single name string or list of name strings
: param request : A : py : class : ` p4p . Value ` or string to qualify this request , or None to use a default .
: param callable handler : Completion notification . Called with a Value , RemoteError , or Cancelled
: returns : A object with a method cancel ( ) which may be used to abort the operation ."""
|
chan = self . _channel ( name )
return _p4p . ClientOperation ( chan , handler = unwrapHandler ( handler , self . _nt ) , pvRequest = wrapRequest ( request ) , get = True , put = False )
|
def register_i18n_js ( self , * paths ) :
"""Register templates path translations files , like
` select2 / select2 _ locale _ { lang } . js ` .
Only existing files are registered ."""
|
languages = self . config [ "BABEL_ACCEPT_LANGUAGES" ]
assets = self . extensions [ "webassets" ]
for path in paths :
for lang in languages :
filename = path . format ( lang = lang )
try :
assets . resolver . search_for_source ( assets , filename )
except IOError :
pass
# logger . debug ( ' i18n JS not found , skipped : " % s " ' , filename )
else :
self . register_asset ( "js-i18n-" + lang , filename )
|
def resample_ann ( resampled_t , ann_sample ) :
"""Compute the new annotation indices
Parameters
resampled _ t : numpy array
Array of signal locations as returned by scipy . signal . resample
ann _ sample : numpy array
Array of annotation locations
Returns
resampled _ ann _ sample : numpy array
Array of resampled annotation locations"""
|
tmp = np . zeros ( len ( resampled_t ) , dtype = 'int16' )
j = 0
tprec = resampled_t [ j ]
for i , v in enumerate ( ann_sample ) :
while True :
d = False
if v < tprec :
j -= 1
tprec = resampled_t [ j ]
if j + 1 == len ( resampled_t ) :
tmp [ j ] += 1
break
tnow = resampled_t [ j + 1 ]
if tprec <= v and v <= tnow :
if v - tprec < tnow - v :
tmp [ j ] += 1
else :
tmp [ j + 1 ] += 1
d = True
j += 1
tprec = tnow
if d :
break
idx = np . where ( tmp > 0 ) [ 0 ] . astype ( 'int64' )
res = [ ]
for i in idx :
for j in range ( tmp [ i ] ) :
res . append ( i )
assert len ( res ) == len ( ann_sample )
return np . asarray ( res , dtype = 'int64' )
|
def new_result ( self , data_mode = 'value' , time_mode = 'framewise' ) :
'''Create a new result
Attributes
data _ object : MetadataObject
id _ metadata : MetadataObject
audio _ metadata : MetadataObject
frame _ metadata : MetadataObject
label _ metadata : MetadataObject
parameters : dict'''
|
from datetime import datetime
result = AnalyzerResult ( data_mode = data_mode , time_mode = time_mode )
# Automatically write known metadata
result . id_metadata . date = datetime . now ( ) . replace ( microsecond = 0 ) . isoformat ( ' ' )
result . id_metadata . version = timeside . core . __version__
result . id_metadata . author = 'TimeSide'
result . id_metadata . id = self . id ( )
result . id_metadata . name = self . name ( )
result . id_metadata . description = self . description ( )
result . id_metadata . unit = self . unit ( )
result . id_metadata . proc_uuid = self . uuid ( )
result . audio_metadata . uri = self . mediainfo ( ) [ 'uri' ]
result . audio_metadata . sha1 = self . mediainfo ( ) [ 'sha1' ]
result . audio_metadata . start = self . mediainfo ( ) [ 'start' ]
result . audio_metadata . duration = self . mediainfo ( ) [ 'duration' ]
result . audio_metadata . is_segment = self . mediainfo ( ) [ 'is_segment' ]
result . audio_metadata . channels = self . channels ( )
result . parameters = Parameters ( self . get_parameters ( ) )
if time_mode == 'framewise' :
result . data_object . frame_metadata . samplerate = self . result_samplerate
result . data_object . frame_metadata . blocksize = self . result_blocksize
result . data_object . frame_metadata . stepsize = self . result_stepsize
return result
|
def macronize_text ( self , text ) :
"""Return macronized form of text .
E . g . " Gallia est omnis divisa in partes tres , " - >
" galliā est omnis dīvīsa in partēs trēs , "
: param text : raw text
: return : macronized text
: rtype : str"""
|
macronized_words = [ entry [ 2 ] for entry in self . macronize_tags ( text ) ]
return " " . join ( macronized_words )
|
def partition_chem_env ( self , n_sphere = 4 , use_lookup = None ) :
"""This function partitions the molecule into subsets of the
same chemical environment .
A chemical environment is specified by the number of
surrounding atoms of a certain kind around an atom with a
certain atomic number represented by a tuple of a string
and a frozenset of tuples .
The ` ` n _ sphere ` ` option determines how many branches the
algorithm follows to determine the chemical environment .
Example :
A carbon atom in ethane has bonds with three hydrogen ( atomic
number 1 ) and one carbon atom ( atomic number 6 ) .
If ` ` n _ sphere = 1 ` ` these are the only atoms we are
interested in and the chemical environment is : :
( ' C ' , frozenset ( [ ( ' H ' , 3 ) , ( ' C ' , 1 ) ] ) )
If ` ` n _ sphere = 2 ` ` we follow every atom in the chemical
enviromment of ` ` n _ sphere = 1 ` ` to their direct neighbours .
In the case of ethane this gives : :
( ' C ' , frozenset ( [ ( ' H ' , 6 ) , ( ' C ' , 1 ) ] ) )
In the special case of ethane this is the whole molecule ;
in other cases you can apply this operation recursively and
stop after ` ` n _ sphere ` ` or after reaching the end of
branches .
Args :
n _ sphere ( int ) :
use _ lookup ( bool ) : Use a lookup variable for
: meth : ` ~ chemcoord . Cartesian . get _ bonds ` . The default is
specified in ` ` settings [ ' defaults ' ] [ ' use _ lookup ' ] ` `
Returns :
dict : The output will look like this : :
{ ( element _ symbol , frozenset ( [ tuples ] ) ) : set ( [ indices ] ) }
A dictionary mapping from a chemical environment to
the set of indices of atoms in this environment ."""
|
if use_lookup is None :
use_lookup = settings [ 'defaults' ] [ 'use_lookup' ]
def get_chem_env ( self , i , n_sphere ) :
env_index = self . get_coordination_sphere ( i , n_sphere = n_sphere , only_surface = False , give_only_index = True , use_lookup = use_lookup )
env_index . remove ( i )
atoms = self . loc [ env_index , 'atom' ]
environment = frozenset ( collections . Counter ( atoms ) . most_common ( ) )
return ( self . loc [ i , 'atom' ] , environment )
chemical_environments = collections . defaultdict ( set )
for i in self . index :
chemical_environments [ get_chem_env ( self , i , n_sphere ) ] . add ( i )
return dict ( chemical_environments )
|
def present ( name , type , url , access = None , user = None , password = None , database = None , basic_auth = None , basic_auth_user = None , basic_auth_password = None , tls_auth = None , json_data = None , is_default = None , with_credentials = None , type_logo_url = None , orgname = None , profile = 'grafana' ) :
'''Ensure that a data source is present .
name
Name of the data source .
type
Type of the datasource ( ' graphite ' , ' influxdb ' etc . ) .
access
Use proxy or direct . Default : proxy
url
The URL to the data source API .
user
Optional - user to authenticate with the data source .
password
Optional - password to authenticate with the data source .
database
Optional - database to use with the data source .
basic _ auth
Optional - set to True to use HTTP basic auth to authenticate with the
data source .
basic _ auth _ user
Optional - HTTP basic auth username .
basic _ auth _ password
Optional - HTTP basic auth password .
json _ data
Optional - additional json data to post ( eg . " timeInterval " ) .
is _ default
Optional - set data source as default .
with _ credentials
Optional - Whether credentials such as cookies or auth headers should
be sent with cross - site requests .
type _ logo _ url
Optional - Logo to use for this datasource .
orgname
Name of the organization in which the data source should be present .
profile
Configuration profile used to connect to the Grafana instance .
Default is ' grafana ' .'''
|
if isinstance ( profile , string_types ) :
profile = __salt__ [ 'config.option' ] ( profile )
ret = { 'name' : name , 'result' : None , 'comment' : None , 'changes' : { } }
datasource = __salt__ [ 'grafana4.get_datasource' ] ( name , orgname , profile )
data = _get_json_data ( name = name , type = type , url = url , access = access , user = user , password = password , database = database , basicAuth = basic_auth , basicAuthUser = basic_auth_user , basicAuthPassword = basic_auth_password , tlsAuth = tls_auth , jsonData = json_data , isDefault = is_default , withCredentials = with_credentials , typeLogoUrl = type_logo_url , defaults = datasource )
if not datasource :
if __opts__ [ 'test' ] :
ret [ 'comment' ] = 'Datasource {0} will be created' . format ( name )
return ret
__salt__ [ 'grafana4.create_datasource' ] ( profile = profile , ** data )
datasource = __salt__ [ 'grafana4.get_datasource' ] ( name , profile = profile )
ret [ 'result' ] = True
ret [ 'comment' ] = 'New data source {0} added' . format ( name )
ret [ 'changes' ] = data
return ret
# At this stage , the datasource exists ; however , the object provided by
# Grafana may lack some null keys compared to our " data " dict :
for key in data :
if key not in datasource :
datasource [ key ] = None
if data == datasource :
ret [ 'comment' ] = 'Data source {0} already up-to-date' . format ( name )
return ret
if __opts__ [ 'test' ] :
ret [ 'comment' ] = 'Datasource {0} will be updated' . format ( name )
return ret
__salt__ [ 'grafana4.update_datasource' ] ( datasource [ 'id' ] , profile = profile , ** data )
ret [ 'result' ] = True
ret [ 'changes' ] = deep_diff ( datasource , data , ignore = [ 'id' , 'orgId' , 'readOnly' ] )
ret [ 'comment' ] = 'Data source {0} updated' . format ( name )
return ret
|
def multi_interpolation_basis ( n_objectives = 6 , n_interp_steps = 5 , width = 128 , channels = 3 ) :
"""A paramaterization for interpolating between each pair of N objectives .
Sometimes you want to interpolate between optimizing a bunch of objectives ,
in a paramaterization that encourages images to align .
Args :
n _ objectives : number of objectives you want interpolate between
n _ interp _ steps : number of interpolation steps
width : width of intepolated images
channel
Returns :
A [ n _ objectives , n _ objectives , n _ interp _ steps , width , width , channel ]
shaped tensor , t , where the final [ width , width , channel ] should be
seen as images , such that the following properties hold :
t [ a , b ] = t [ b , a , : : - 1]
t [ a , i , 0 ] = t [ a , j , 0 ] for all i , j
t [ a , a , i ] = t [ a , a , j ] for all i , j
t [ a , b , i ] = t [ b , a , - i ] for all i"""
|
N , M , W , Ch = n_objectives , n_interp_steps , width , channels
const_term = sum ( [ lowres_tensor ( [ W , W , Ch ] , [ W // k , W // k , Ch ] ) for k in [ 1 , 2 , 4 , 8 ] ] )
const_term = tf . reshape ( const_term , [ 1 , 1 , 1 , W , W , Ch ] )
example_interps = [ sum ( [ lowres_tensor ( [ M , W , W , Ch ] , [ 2 , W // k , W // k , Ch ] ) for k in [ 1 , 2 , 4 , 8 ] ] ) for _ in range ( N ) ]
example_basis = [ ]
for n in range ( N ) :
col = [ ]
for m in range ( N ) :
interp = example_interps [ n ] + example_interps [ m ] [ : : - 1 ]
col . append ( interp )
example_basis . append ( col )
interp_basis = [ ]
for n in range ( N ) :
col = [ interp_basis [ m ] [ N - n ] [ : : - 1 ] for m in range ( n ) ]
col . append ( tf . zeros ( [ M , W , W , 3 ] ) )
for m in range ( n + 1 , N ) :
interp = sum ( [ lowres_tensor ( [ M , W , W , Ch ] , [ M , W // k , W // k , Ch ] ) for k in [ 1 , 2 ] ] )
col . append ( interp )
interp_basis . append ( col )
basis = [ ]
for n in range ( N ) :
col_ex = tf . stack ( example_basis [ n ] )
col_in = tf . stack ( interp_basis [ n ] )
basis . append ( col_ex + col_in )
basis = tf . stack ( basis )
return basis + const_term
|
def validate ( self , fixerrors = True ) :
"""Validates that the geometry is correctly formatted according to the geometry type .
Parameters :
- * * fixerrors * * ( optional ) : Attempts to fix minor errors without raising exceptions ( defaults to True )
Returns :
- True if the geometry is valid .
Raises :
- An Exception if not valid ."""
|
# validate nullgeometry or has type and coordinates keys
if not self . _data : # null geometry , no further checking needed
return True
elif "type" not in self . _data or "coordinates" not in self . _data :
raise Exception ( "A geometry dictionary or instance must have the type and coordinates entries" )
# first validate geometry type
if not self . type in ( "Point" , "MultiPoint" , "LineString" , "MultiLineString" , "Polygon" , "MultiPolygon" ) :
if fixerrors :
coretype = self . type . lower ( ) . replace ( "multi" , "" )
if coretype == "point" :
newtype = "Point"
elif coretype == "linestring" :
newtype = "LineString"
elif coretype == "polygon" :
newtype = "Polygon"
else :
raise Exception ( 'Invalid geometry type. Must be one of: "Point","MultiPoint","LineString","MultiLineString","Polygon","MultiPolygon"' )
if self . type . lower ( ) . startswith ( "multi" ) :
newtype = "Multi" + newtype
self . type = newtype
else :
raise Exception ( 'Invalid geometry type. Must be one of: "Point","MultiPoint","LineString","MultiLineString","Polygon","MultiPolygon"' )
# then validate coordinate data type
coords = self . _data [ "coordinates" ]
if not isinstance ( coords , ( list , tuple ) ) :
raise Exception ( "Coordinates must be a list or tuple type" )
# then validate coordinate structures
if self . type == "Point" :
if not len ( coords ) == 2 :
raise Exception ( "Point must be one coordinate pair" )
elif self . type in ( "MultiPoint" , "LineString" ) :
if not len ( coords ) > 1 :
raise Exception ( "MultiPoint and LineString must have more than one coordinates" )
elif self . type == "MultiLineString" :
for line in coords :
if not len ( line ) > 1 :
raise Exception ( "All LineStrings in a MultiLineString must have more than one coordinate" )
elif self . type == "Polygon" :
for exterior_or_holes in coords :
if not len ( exterior_or_holes ) >= 3 :
raise Exception ( "The exterior and all holes in a Polygon must have at least 3 coordinates" )
elif self . type == "MultiPolygon" :
for eachmulti in coords :
for exterior_or_holes in eachmulti :
if not len ( exterior_or_holes ) >= 3 :
raise Exception ( "The exterior and all holes in all Polygons of a MultiPolygon must have at least 3 coordinates" )
# validation successful
return True
|
def registrar_for_scope ( cls , goal ) :
"""Returns a subclass of this registrar suitable for registering on the specified goal .
Allows reuse of the same registrar for multiple goals , and also allows us to decouple task
code from knowing which goal ( s ) the task is to be registered in ."""
|
type_name = '{}_{}' . format ( cls . __name__ , goal )
if PY2 :
type_name = type_name . encode ( 'utf-8' )
return type ( type_name , ( cls , ) , { 'options_scope' : goal } )
|
def list ( self , ** params ) :
"""Retrieve all contacts
Returns all contacts available to the user according to the parameters provided
: calls : ` ` get / contacts ` `
: param dict params : ( optional ) Search options .
: return : List of dictionaries that support attriubte - style access , which represent collection of Contacts .
: rtype : list"""
|
_ , _ , contacts = self . http_client . get ( "/contacts" , params = params )
return contacts
|
def get_idxs ( data , eid2idx ) :
"""Convert from event IDs to event indices .
: param data : an array with a field eid
: param eid2idx : a dictionary eid - > idx
: returns : the array of event indices"""
|
uniq , inv = numpy . unique ( data [ 'eid' ] , return_inverse = True )
idxs = numpy . array ( [ eid2idx [ eid ] for eid in uniq ] ) [ inv ]
return idxs
|
def match_url ( self , request ) :
"""Match the request against a file in the adapter directory
: param request : The request
: type request : : class : ` requests . Request `
: return : Path to the file
: rtype : ` ` str ` `"""
|
parsed_url = urlparse ( request . path_url )
path_url = parsed_url . path
query_params = parsed_url . query
match = None
for path in self . paths :
for item in self . index :
target_path = os . path . join ( BASE_PATH , path , path_url [ 1 : ] )
query_path = target_path . lower ( ) + quote ( '?' + query_params ) . lower ( )
if target_path . lower ( ) == item [ 0 ] :
match = item [ 1 ]
break
elif query_path == item [ 0 ] :
match = item [ 1 ]
break
return match
|
def _get_aria_autocomplete ( self , field ) :
"""Returns the appropriate value for attribute aria - autocomplete of field .
: param field : The field .
: type field : hatemile . util . html . htmldomelement . HTMLDOMElement
: return : The ARIA value of field .
: rtype : str"""
|
tag_name = field . get_tag_name ( )
input_type = None
if field . has_attribute ( 'type' ) :
input_type = field . get_attribute ( 'type' ) . lower ( )
if ( ( tag_name == 'TEXTAREA' ) or ( ( tag_name == 'INPUT' ) and ( not ( ( input_type == 'button' ) or ( input_type == 'submit' ) or ( input_type == 'reset' ) or ( input_type == 'image' ) or ( input_type == 'file' ) or ( input_type == 'checkbox' ) or ( input_type == 'radio' ) or ( input_type == 'hidden' ) ) ) ) ) :
value = None
if field . has_attribute ( 'autocomplete' ) :
value = field . get_attribute ( 'autocomplete' ) . lower ( )
else :
form = self . parser . find ( field ) . find_ancestors ( 'form' ) . first_result ( )
if ( form is None ) and ( field . has_attribute ( 'form' ) ) :
form = self . parser . find ( '#' + field . get_attribute ( 'form' ) ) . first_result ( )
if ( form is not None ) and ( form . has_attribute ( 'autocomplete' ) ) :
value = form . get_attribute ( 'autocomplete' ) . lower ( )
if value == 'on' :
return 'both'
elif ( ( field . has_attribute ( 'list' ) ) and ( self . parser . find ( 'datalist[id="' + field . get_attribute ( 'list' ) + '"]' ) . first_result ( ) is not None ) ) :
return 'list'
elif value == 'off' :
return 'none'
return None
|
def _aggregate_on_chunks ( x , f_agg , chunk_len ) :
"""Takes the time series x and constructs a lower sampled version of it by applying the aggregation function f _ agg on
consecutive chunks of length chunk _ len
: param x : the time series to calculate the aggregation of
: type x : numpy . ndarray
: param f _ agg : The name of the aggregation function that should be an attribute of the pandas . Series
: type f _ agg : str
: param chunk _ len : The size of the chunks where to aggregate the time series
: type chunk _ len : int
: return : A list of the aggregation function over the chunks
: return type : list"""
|
return [ getattr ( x [ i * chunk_len : ( i + 1 ) * chunk_len ] , f_agg ) ( ) for i in range ( int ( np . ceil ( len ( x ) / chunk_len ) ) ) ]
|
def badgify_badges ( ** kwargs ) :
"""Returns all badges or only awarded badges for the given user ."""
|
User = get_user_model ( )
user = kwargs . get ( 'user' , None )
username = kwargs . get ( 'username' , None )
if username :
try :
user = User . objects . get ( username = username )
except User . DoesNotExist :
pass
if user :
awards = Award . objects . filter ( user = user ) . select_related ( 'badge' )
badges = [ award . badge for award in awards ]
return badges
return Badge . objects . all ( )
|
def room ( self , name , participantIdentity = None , ** kwargs ) :
"""Create a < Room > element
: param name : Room name
: param participantIdentity : Participant identity when connecting to the Room
: param kwargs : additional attributes
: returns : < Room > element"""
|
return self . nest ( Room ( name , participantIdentity = participantIdentity , ** kwargs ) )
|
def msg ( message ) :
"""Log a regular message
: param message : the message to be logged"""
|
to_stdout ( " --- {message}" . format ( message = message ) )
if _logger :
_logger . info ( message )
|
def remove ( self , rev , permanent = False ) :
"""Removes a revision from this changelist
: param rev : Revision to remove
: type rev : : class : ` . Revision `
: param permanent : Whether or not we need to set the changelist to default
: type permanent : bool"""
|
if not isinstance ( rev , Revision ) :
raise TypeError ( 'argument needs to be an instance of Revision' )
if rev not in self :
raise ValueError ( '{} not in changelist' . format ( rev ) )
self . _files . remove ( rev )
if not permanent :
rev . changelist = self . _connection . default
|
def get_full_recirc_content ( self , published = True ) :
"""performs es search and gets all content objects"""
|
q = self . get_query ( )
search = custom_search_model ( Content , q , published = published , field_map = { "feature_type" : "feature_type.slug" , "tag" : "tags.slug" , "content-type" : "_type" } )
return search
|
def wrap_io_os_err ( e ) :
'''Formats IO and OS error messages for wrapping in FSQExceptions'''
|
msg = ''
if e . strerror :
msg = e . strerror
if e . message :
msg = ' ' . join ( [ e . message , msg ] )
if e . filename :
msg = ': ' . join ( [ msg , e . filename ] )
return msg
|
def extract_css_links ( bs4 ) :
"""Extracting css links from BeautifulSoup object
: param bs4 : ` BeautifulSoup `
: return : ` list ` List of links"""
|
links = extract_links ( bs4 )
real_css = [ anchor for anchor in links if anchor . endswith ( ( '.css' , '.CSS' ) ) ]
css_link_tags = [ anchor [ 'href' ] for anchor in bs4 . select ( 'link[type="text/css"]' ) if anchor . has_attr ( 'href' ) ]
return list ( set ( real_css + css_link_tags ) )
|
async def list_statuses ( self , request ) :
"""Fetches the committed status of batches by either a POST or GET .
Request :
body : A JSON array of one or more id strings ( if POST )
query :
- id : A comma separated list of up to 15 ids ( if GET )
- wait : Request should not return until all batches committed
Response :
data : A JSON object , with batch ids as keys , and statuses as values
link : The / batch _ statuses link queried ( if GET )"""
|
error_traps = [ error_handlers . StatusResponseMissing ]
# Parse batch ids from POST body , or query paramaters
if request . method == 'POST' :
if request . headers [ 'Content-Type' ] != 'application/json' :
LOGGER . debug ( 'Request headers had wrong Content-Type: %s' , request . headers [ 'Content-Type' ] )
raise errors . StatusWrongContentType ( )
ids = await request . json ( )
if ( not ids or not isinstance ( ids , list ) or not all ( isinstance ( i , str ) for i in ids ) ) :
LOGGER . debug ( 'Request body was invalid: %s' , ids )
raise errors . StatusBodyInvalid ( )
for i in ids :
self . _validate_id ( i )
else :
ids = self . _get_filter_ids ( request )
if not ids :
LOGGER . debug ( 'Request for statuses missing id query' )
raise errors . StatusIdQueryInvalid ( )
# Query validator
validator_query = client_batch_submit_pb2 . ClientBatchStatusRequest ( batch_ids = ids )
self . _set_wait ( request , validator_query )
response = await self . _query_validator ( Message . CLIENT_BATCH_STATUS_REQUEST , client_batch_submit_pb2 . ClientBatchStatusResponse , validator_query , error_traps )
# Send response
if request . method != 'POST' :
metadata = self . _get_metadata ( request , response )
else :
metadata = None
data = self . _drop_id_prefixes ( self . _drop_empty_props ( response [ 'batch_statuses' ] ) )
return self . _wrap_response ( request , data = data , metadata = metadata )
|
def fft_coefficient ( self , x , param = None ) :
"""As in tsfresh ` fft _ coefficient < https : / / github . com / blue - yonder / tsfresh / blob / master / tsfresh / feature _ extraction / feature _ calculators . py # L852 > ` _ Calculates the fourier coefficients of the one - dimensional discrete Fourier Transform for real input by fast fourier transformation algorithm
. . math : :
A _ k = \\ sum _ { m = 0 } ^ { n - 1 } a _ m \\ exp \\ left \\ { - 2 \\ pi i \\ frac { m k } { n } \\ right \\ } , \\ qquad k = 0 , \\ ldots , n - 1.
The resulting coefficients will be complex , this feature calculator can return the real part ( attr = = " real " ) , the imaginary part ( attr = = " imag ) , the absolute value ( attr = " " abs ) and the angle in degrees ( attr = = " angle ) .
: param x : the time series to calculate the feature of
: type x : pandas . Series
: param param : contains dictionaries { " coeff " : x , " attr " : s } with x int and x > = 0 , s str and in [ " real " , " imag " , " abs " , " angle " ]
: type param : list
: return : the different feature values
: rtype : pandas . Series"""
|
if param is None :
param = [ { 'attr' : 'abs' , 'coeff' : 44 } , { 'attr' : 'abs' , 'coeff' : 63 } , { 'attr' : 'abs' , 'coeff' : 0 } , { 'attr' : 'real' , 'coeff' : 0 } , { 'attr' : 'real' , 'coeff' : 23 } ]
_fft_coef = feature_calculators . fft_coefficient ( x , param )
logging . debug ( "fft coefficient by tsfresh calculated" )
return list ( _fft_coef )
|
def get_country_long ( self , ip ) :
'''Get country _ long'''
|
rec = self . get_all ( ip )
return rec and rec . country_long
|
def is_jid ( jid ) :
'''Returns True if the passed in value is a job id'''
|
if not isinstance ( jid , six . string_types ) :
return False
if len ( jid ) != 20 and ( len ( jid ) <= 21 or jid [ 20 ] != '_' ) :
return False
try :
int ( jid [ : 20 ] )
return True
except ValueError :
return False
|
def event ( self , event ) :
"""Allow GUI to be closed upon holding Shift"""
|
if event . type ( ) == QtCore . QEvent . Close :
modifiers = self . app . queryKeyboardModifiers ( )
shift_pressed = QtCore . Qt . ShiftModifier & modifiers
states = self . app . controller . states
if shift_pressed :
print ( "Force quitted.." )
self . app . controller . host . emit ( "pyblishQmlCloseForced" )
event . accept ( )
elif any ( state in states for state in ( "ready" , "finished" ) ) :
self . app . controller . host . emit ( "pyblishQmlClose" )
event . accept ( )
else :
print ( "Not ready, hold SHIFT to force an exit" )
event . ignore ( )
return super ( Window , self ) . event ( event )
|
def update ( self ) :
'''Update our object ' s data'''
|
self . _json = self . _request ( method = 'GET' , url = self . API ) . _json
|
def validate ( self , input_parameters , context ) :
"""Runs all set type transformers / validators against the provided input parameters and returns any errors"""
|
errors = { }
for key , type_handler in self . input_transformations . items ( ) :
if self . raise_on_invalid :
if key in input_parameters :
input_parameters [ key ] = self . initialize_handler ( type_handler , input_parameters [ key ] , context = context )
else :
try :
if key in input_parameters :
input_parameters [ key ] = self . initialize_handler ( type_handler , input_parameters [ key ] , context = context )
except InvalidTypeData as error :
errors [ key ] = error . reasons or str ( error . message )
except Exception as error :
if hasattr ( error , 'args' ) and error . args :
errors [ key ] = error . args [ 0 ]
else :
errors [ key ] = str ( error )
for require in self . required :
if not require in input_parameters :
errors [ require ] = "Required parameter '{}' not supplied" . format ( require )
if not errors and getattr ( self , 'validate_function' , False ) :
errors = self . validate_function ( input_parameters )
return errors
|
def group_by_key_func ( iterable , key_func ) :
"""Create a dictionary from an iterable such that the keys are the result of evaluating a key function on elements
of the iterable and the values are lists of elements all of which correspond to the key .
> > > def si ( d ) : return sorted ( d . items ( ) )
> > > si ( group _ by _ key _ func ( " a bb ccc d ee fff " . split ( ) , len ) )
[ ( 1 , [ ' a ' , ' d ' ] ) , ( 2 , [ ' bb ' , ' ee ' ] ) , ( 3 , [ ' ccc ' , ' fff ' ] ) ]
> > > si ( group _ by _ key _ func ( [ - 1 , 0 , 1 , 3 , 6 , 8 , 9 , 2 ] , lambda x : x % 2 ) )
[ ( 0 , [ 0 , 6 , 8 , 2 ] ) , ( 1 , [ - 1 , 1 , 3 , 9 ] ) ]"""
|
result = defaultdict ( list )
for item in iterable :
result [ key_func ( item ) ] . append ( item )
return result
|
def read_config ( * args ) :
'''Read Traffic Server configuration variable definitions .
. . versionadded : : 2016.11.0
. . code - block : : bash
salt ' * ' trafficserver . read _ config proxy . config . http . keep _ alive _ post _ out'''
|
ret = { }
if _TRAFFICCTL :
cmd = _traffic_ctl ( 'config' , 'get' )
else :
cmd = _traffic_line ( '-r' )
try :
for arg in args :
log . debug ( 'Querying: %s' , arg )
ret [ arg ] = _subprocess ( cmd + [ arg ] )
except KeyError :
pass
return ret
|
def end_at ( self , document_fields ) :
"""End query results at a particular document value .
The result set will * * include * * the document specified by
` ` document _ fields ` ` .
If the current query already has specified an end cursor - - either
via this method or
: meth : ` ~ . firestore _ v1beta1 . query . Query . end _ before ` - - this will
overwrite it .
When the query is sent to the server , the ` ` document _ fields ` ` will
be used in the order given by fields set by
: meth : ` ~ . firestore _ v1beta1 . query . Query . order _ by ` .
Args :
document _ fields ( Union [ ~ . firestore _ v1beta1 . document . DocumentSnapshot , dict , list , tuple ] ) : a document
snapshot or a dictionary / list / tuple of fields representing a
query results cursor . A cursor is a collection of values that
represent a position in a query result set .
Returns :
~ . firestore _ v1beta1 . query . Query : A query with cursor . Acts as
a copy of the current query , modified with the newly added
" end at " cursor ."""
|
return self . _cursor_helper ( document_fields , before = False , start = False )
|
def _build ( self , parent_cache ) :
"""Build the initial cache from the parent .
Only include the | MICE | which are unaffected by the subsystem cut .
A | MICE | is affected if either the cut splits the mechanism
or splits the connections between the purview and mechanism"""
|
for key , mice in parent_cache . cache . items ( ) :
if not mice . damaged_by_cut ( self . subsystem ) :
self . cache [ key ] = mice
|
def login_user ( server , login , password ) :
"""Get the login session .
: param server : The Geonode server URL .
: type server : basestring
: param login : The login to use on Geonode .
: type login : basestring
: param password : The password to use on Geonode .
: type password : basestring"""
|
login_url = urljoin ( server , login_url_prefix )
# Start the web session
session = requests . session ( )
result = session . get ( login_url )
# Check if the request ok
if not result . ok :
message = ( tr ( 'Request failed to {geonode_url}, got status code {status_code} ' 'and reason {request_reason}' ) . format ( geonode_url = server , status_code = result . status_code , request_reason = result . reason ) )
raise GeoNodeInstanceError ( message )
# Take the CSRF token
login_form_regexp = ( "<input type='hidden' name='csrfmiddlewaretoken' value='(.*)' />" )
expression_compiled = re . compile ( login_form_regexp )
match = expression_compiled . search ( result . text )
csrf_token = match . groups ( ) [ 0 ]
payload = { 'username' : login , 'password' : password , 'csrfmiddlewaretoken' : csrf_token , }
# Make the login
result = session . post ( login_url , data = payload , headers = dict ( referer = login_url ) )
# Check the result url to check if the login success
if result . url == login_url :
message = tr ( 'Failed to login to GeoNode at {geonode_url}' ) . format ( geonode_url = server )
raise GeoNodeLoginError ( message )
return session
|
def match_arr_lengths ( l ) :
"""Check that all the array lengths match so that a DataFrame can be created successfully .
: param list l : Nested arrays
: return bool : Valid or invalid"""
|
try : # length of first list . use as basis to check other list lengths against .
inner_len = len ( l [ 0 ] )
# check each nested list
for i in l : # if the length doesn ' t match the first list , then don ' t proceed .
if len ( i ) != inner_len :
return False
except IndexError : # couldn ' t get index 0 . Wrong data type given or not nested lists
print ( "Error: Array data is not formatted correctly." )
return False
except TypeError : # Non - iterable data type given .
print ( "Error: Array data missing" )
return False
# all array lengths are equal . made it through the whole list successfully
return True
|
def _is_en_passant_valid ( self , opponent_pawn_location , position ) :
"""Finds if their opponent ' s pawn is next to this pawn
: rtype : bool"""
|
try :
pawn = position . piece_at_square ( opponent_pawn_location )
return pawn is not None and isinstance ( pawn , Pawn ) and pawn . color != self . color and position . piece_at_square ( opponent_pawn_location ) . just_moved_two_steps
except IndexError :
return False
|
def namedb_namespace_insert ( cur , input_namespace_rec ) :
"""Add a namespace to the database ,
if it doesn ' t exist already .
It must be a * revealed * namespace , not a ready namespace
( to mark a namespace as ready , you should use the namedb _ apply _ operation ( )
method ) ."""
|
namespace_rec = copy . deepcopy ( input_namespace_rec )
namedb_namespace_fields_check ( namespace_rec )
try :
query , values = namedb_insert_prepare ( cur , namespace_rec , "namespaces" )
except Exception , e :
log . exception ( e )
log . error ( "FATAL: Failed to insert revealed namespace '%s'" % namespace_rec [ 'namespace_id' ] )
os . abort ( )
namedb_query_execute ( cur , query , values )
return True
|
def build ( ctx , max_revisions , targets , operators , archiver ) :
"""Build the wily cache ."""
|
config = ctx . obj [ "CONFIG" ]
from wily . commands . build import build
if max_revisions :
logger . debug ( f"Fixing revisions to {max_revisions}" )
config . max_revisions = max_revisions
if operators :
logger . debug ( f"Fixing operators to {operators}" )
config . operators = operators . strip ( ) . split ( "," )
if archiver :
logger . debug ( f"Fixing archiver to {archiver}" )
config . archiver = archiver
if targets :
logger . debug ( f"Fixing targets to {targets}" )
config . targets = targets
build ( config = config , archiver = resolve_archiver ( config . archiver ) , operators = resolve_operators ( config . operators ) , )
logger . info ( "Completed building wily history, run `wily report <file>` or `wily index` to see more." )
|
def list_items ( queue ) :
'''List contents of a queue'''
|
itemstuple = _list_items ( queue )
items = [ item [ 0 ] for item in itemstuple ]
return items
|
def parse_fastp_log ( self , f ) :
"""Parse the JSON output from fastp and save the summary statistics"""
|
try :
parsed_json = json . load ( f [ 'f' ] )
except :
log . warn ( "Could not parse fastp JSON: '{}'" . format ( f [ 'fn' ] ) )
return None
# Fetch a sample name from the command
s_name = f [ 's_name' ]
cmd = parsed_json [ 'command' ] . split ( )
for i , v in enumerate ( cmd ) :
if v == '-i' :
s_name = self . clean_s_name ( cmd [ i + 1 ] , f [ 'root' ] )
if s_name == 'fastp' :
log . warn ( 'Could not parse sample name from fastp command: {}' . format ( f [ 'fn' ] ) )
self . add_data_source ( f , s_name )
self . fastp_data [ s_name ] = { }
self . fastp_duplication_plotdata [ s_name ] = { }
self . fastp_insert_size_data [ s_name ] = { }
self . fastp_all_data [ s_name ] = parsed_json
for k in [ 'read1_before_filtering' , 'read2_before_filtering' , 'read1_after_filtering' , 'read2_after_filtering' ] :
self . fastp_qual_plotdata [ k ] [ s_name ] = { }
self . fastp_gc_content_data [ k ] [ s_name ] = { }
self . fastp_n_content_data [ k ] [ s_name ] = { }
# Parse filtering _ result
try :
for k in parsed_json [ 'filtering_result' ] :
self . fastp_data [ s_name ] [ 'filtering_result_{}' . format ( k ) ] = float ( parsed_json [ 'filtering_result' ] [ k ] )
except KeyError :
log . debug ( "fastp JSON did not have 'filtering_result' key: '{}'" . format ( f [ 'fn' ] ) )
# Parse duplication
try :
self . fastp_data [ s_name ] [ 'pct_duplication' ] = float ( parsed_json [ 'duplication' ] [ 'rate' ] * 100.0 )
except KeyError :
log . debug ( "fastp JSON did not have a 'duplication' key: '{}'" . format ( f [ 'fn' ] ) )
# Parse after _ filtering
try :
for k in parsed_json [ 'summary' ] [ 'after_filtering' ] :
self . fastp_data [ s_name ] [ 'after_filtering_{}' . format ( k ) ] = float ( parsed_json [ 'summary' ] [ 'after_filtering' ] [ k ] )
except KeyError :
log . debug ( "fastp JSON did not have a 'summary'-'after_filtering' keys: '{}'" . format ( f [ 'fn' ] ) )
# Parse data required to calculate Pct reads surviving
try :
self . fastp_data [ s_name ] [ 'before_filtering_total_reads' ] = float ( parsed_json [ 'summary' ] [ 'before_filtering' ] [ 'total_reads' ] )
except KeyError :
log . debug ( "Could not find pre-filtering # reads: '{}'" . format ( f [ 'fn' ] ) )
try :
self . fastp_data [ s_name ] [ 'pct_surviving' ] = ( self . fastp_data [ s_name ] [ 'after_filtering_total_reads' ] / self . fastp_data [ s_name ] [ 'before_filtering_total_reads' ] ) * 100.0
except KeyError :
log . debug ( "Could not calculate 'pct_surviving': {}" . format ( f [ 'fn' ] ) )
# Parse adapter _ cutting
try :
for k in parsed_json [ 'adapter_cutting' ] :
try :
self . fastp_data [ s_name ] [ 'adapter_cutting_{}' . format ( k ) ] = float ( parsed_json [ 'adapter_cutting' ] [ k ] )
except ( ValueError , TypeError ) :
pass
except KeyError :
log . debug ( "fastp JSON did not have a 'adapter_cutting' key, skipping: '{}'" . format ( f [ 'fn' ] ) )
try :
self . fastp_data [ s_name ] [ 'pct_adapter' ] = ( self . fastp_data [ s_name ] [ 'adapter_cutting_adapter_trimmed_reads' ] / self . fastp_data [ s_name ] [ 'before_filtering_total_reads' ] ) * 100.0
except KeyError :
log . debug ( "Could not calculate 'pct_adapter': {}" . format ( f [ 'fn' ] ) )
# Duplication rate plot data
try : # First count the total read count in the dup analysis
total_reads = 0
for v in parsed_json [ 'duplication' ] [ 'histogram' ] :
total_reads += v
if total_reads == 0 :
raise KeyError
# Calculate percentages
for i , v in enumerate ( parsed_json [ 'duplication' ] [ 'histogram' ] ) :
self . fastp_duplication_plotdata [ s_name ] [ i + 1 ] = ( float ( v ) / float ( total_reads ) ) * 100.0
except KeyError :
log . debug ( "No duplication rate plot data: {}" . format ( f [ 'fn' ] ) )
# Insert size plot data
try : # First count the total read count in the insert size analysis
total_reads = 0
max_i = 0
for i , v in enumerate ( parsed_json [ 'insert_size' ] [ 'histogram' ] ) :
total_reads += v
if float ( v ) > 0 :
max_i = i
if total_reads == 0 :
raise KeyError
# Calculate percentages
for i , v in enumerate ( parsed_json [ 'insert_size' ] [ 'histogram' ] ) :
if i <= max_i :
self . fastp_insert_size_data [ s_name ] [ i + 1 ] = ( float ( v ) / float ( total_reads ) ) * 100.0
except KeyError :
log . debug ( "No insert size plot data: {}" . format ( f [ 'fn' ] ) )
for k in [ 'read1_before_filtering' , 'read2_before_filtering' , 'read1_after_filtering' , 'read2_after_filtering' ] : # Read quality data
try :
for i , v in enumerate ( parsed_json [ k ] [ 'quality_curves' ] [ 'mean' ] ) :
self . fastp_qual_plotdata [ k ] [ s_name ] [ i + 1 ] = float ( v )
except KeyError :
log . debug ( "Read quality {} not found: {}" . format ( k , f [ 'fn' ] ) )
# GC and N content plots
try :
for i , v in enumerate ( parsed_json [ k ] [ 'content_curves' ] [ 'GC' ] ) :
self . fastp_gc_content_data [ k ] [ s_name ] [ i + 1 ] = float ( v ) * 100.0
for i , v in enumerate ( parsed_json [ k ] [ 'content_curves' ] [ 'N' ] ) :
self . fastp_n_content_data [ k ] [ s_name ] [ i + 1 ] = float ( v ) * 100.0
except KeyError :
log . debug ( "Content curve data {} not found: {}" . format ( k , f [ 'fn' ] ) )
# Remove empty dicts
if len ( self . fastp_data [ s_name ] ) == 0 :
del self . fastp_data [ s_name ]
if len ( self . fastp_duplication_plotdata [ s_name ] ) == 0 :
del self . fastp_duplication_plotdata [ s_name ]
if len ( self . fastp_insert_size_data [ s_name ] ) == 0 :
del self . fastp_insert_size_data [ s_name ]
if len ( self . fastp_all_data [ s_name ] ) == 0 :
del self . fastp_all_data [ s_name ]
|
def transpose ( self , * axes ) :
"""Permute the dimensions of a Timeseries ."""
|
if self . ndim <= 1 :
return self
ar = np . asarray ( self ) . transpose ( * axes )
if axes [ 0 ] != 0 : # then axis 0 is unaffected by the transposition
newlabels = [ self . labels [ ax ] for ax in axes ]
return Timeseries ( ar , self . tspan , newlabels )
else :
return ar
|
def init_app ( self , app , ** kwargs ) :
"""Initialize application object .
: param app : An instance of : class : ` ~ flask . Flask ` ."""
|
self . init_config ( app )
# Initialize extensions
self . menu_ext . init_app ( app )
self . menu = app . extensions [ 'menu' ]
self . breadcrumbs . init_app ( app )
# Register blueprint in order to register template and static folder .
app . register_blueprint ( Blueprint ( 'invenio_theme' , __name__ , template_folder = 'templates' , static_folder = 'static' , ) )
# Register frontpage blueprint if enabled .
if app . config [ 'THEME_FRONTPAGE' ] :
app . register_blueprint ( blueprint )
# Initialize breadcrumbs .
item = self . menu . submenu ( 'breadcrumbs' )
item . register ( app . config [ 'THEME_BREADCRUMB_ROOT_ENDPOINT' ] , _ ( 'Home' ) )
# Register errors handlers .
app . register_error_handler ( 401 , unauthorized )
app . register_error_handler ( 403 , insufficient_permissions )
app . register_error_handler ( 404 , page_not_found )
app . register_error_handler ( 500 , internal_error )
# Save reference to self on object
app . extensions [ 'invenio-theme' ] = self
|
def write_ndef ( self , ndef , slot = 1 ) :
"""Write an NDEF tag configuration to the YubiKey NEO ."""
|
if not self . capabilities . have_nfc_ndef ( slot ) :
raise yubikey_base . YubiKeyVersionError ( "NDEF slot %i unsupported in %s" % ( slot , self ) )
return self . _device . _write_config ( ndef , _NDEF_SLOTS [ slot ] )
|
def pdb_downloader_and_metadata ( self , outdir = None , pdb_file_type = None , force_rerun = False ) :
"""Download ALL mapped experimental structures to the protein structures directory .
Args :
outdir ( str ) : Path to output directory , if protein structures directory not set or other output directory is
desired
pdb _ file _ type ( str ) : Type of PDB file to download , if not already set or other format is desired
force _ rerun ( bool ) : If files should be re - downloaded if they already exist
Returns :
list : List of PDB IDs that were downloaded
Todo :
* Parse mmtf or PDB file for header information , rather than always getting the cif file for header info"""
|
if not outdir :
outdir = self . structure_dir
if not outdir :
raise ValueError ( 'Output directory must be specified' )
if not pdb_file_type :
pdb_file_type = self . pdb_file_type
# Check if we have any PDBs
if self . num_structures_experimental == 0 :
log . debug ( '{}: no structures available - nothing will be downloaded' . format ( self . id ) )
return
downloaded_pdb_ids = [ ]
# Download the PDBs
for s in self . get_experimental_structures ( ) :
log . debug ( '{}: downloading structure file from the PDB...' . format ( s . id ) )
s . download_structure_file ( outdir = outdir , file_type = pdb_file_type , force_rerun = force_rerun , load_header_metadata = True )
downloaded_pdb_ids . append ( s . id )
return downloaded_pdb_ids
|
def _escape ( self , bits ) :
"""value : ' foobar { ' - > ' foobar { { '
value : ' x } ' - > ' x } } '"""
|
# for value , field _ name , format _ spec , conversion in bits :
while True :
try :
value , field_name , format_spec , conversion = next ( bits )
if value :
end = value [ - 1 ]
if end in ( u'{' , u'}' ) :
value += end
yield value , field_name , format_spec , conversion
except StopIteration :
break
|
def setup_logging ( app , disable_existing_loggers = True ) :
"""Setup the logging using logging . yaml .
: param app : The app which setups the logging . Used for the log ' s filename and for the log ' s name .
: type app : str
: param disable _ existing _ loggers : If False , loggers which exist when this call is made are left enabled .
: type disable _ existing _ loggers : bool
: returns : None"""
|
conf = yaml . load ( open ( os . path . join ( os . path . dirname ( os . path . abspath ( __file__ ) ) , 'logging.yaml' ) , 'r' ) )
conf [ 'disable_existing_loggers' ] = disable_existing_loggers
conf [ 'loggers' ] [ app ] = conf [ 'loggers' ] . pop ( '__name__' )
logging . config . dictConfig ( conf )
|
def removeRnaQuantificationSet ( self ) :
"""Removes an rnaQuantificationSet from this repo"""
|
self . _openRepo ( )
dataset = self . _repo . getDatasetByName ( self . _args . datasetName )
rnaQuantSet = dataset . getRnaQuantificationSetByName ( self . _args . rnaQuantificationSetName )
def func ( ) :
self . _updateRepo ( self . _repo . removeRnaQuantificationSet , rnaQuantSet )
self . _confirmDelete ( "RnaQuantificationSet" , rnaQuantSet . getLocalId ( ) , func )
|
def procrustes ( a , b , reflection = True , translation = True , scale = True , return_cost = True ) :
"""Perform Procrustes ' analysis subject to constraints . Finds the
transformation T mapping a to b which minimizes the square sum
distances between Ta and b , also called the cost .
Parameters
a : ( n , 3 ) float
List of points in space
b : ( n , 3 ) float
List of points in space
reflection : bool
If the transformation is allowed reflections
translation : bool
If the transformation is allowed translations
scale : bool
If the transformation is allowed scaling
return _ cost : bool
Whether to return the cost and transformed a as well
Returns
matrix : ( 4,4 ) float
The transformation matrix sending a to b
transformed : ( n , 3 ) float
The image of a under the transformation
cost : float
The cost of the transformation"""
|
a = np . asanyarray ( a , dtype = np . float64 )
b = np . asanyarray ( b , dtype = np . float64 )
if not util . is_shape ( a , ( - 1 , 3 ) ) or not util . is_shape ( b , ( - 1 , 3 ) ) :
raise ValueError ( 'points must be (n,3)!' )
if len ( a ) != len ( b ) :
raise ValueError ( 'a and b must contain same number of points!' )
# Remove translation component
if translation :
acenter = a . mean ( axis = 0 )
bcenter = b . mean ( axis = 0 )
else :
acenter = np . zeros ( a . shape [ 1 ] )
bcenter = np . zeros ( b . shape [ 1 ] )
# Remove scale component
if scale :
ascale = np . sqrt ( ( ( a - acenter ) ** 2 ) . sum ( ) / len ( a ) )
bscale = np . sqrt ( ( ( b - bcenter ) ** 2 ) . sum ( ) / len ( b ) )
else :
ascale = 1
bscale = 1
# Use SVD to find optimal orthogonal matrix R
# constrained to det ( R ) = 1 if necessary .
u , s , vh = np . linalg . svd ( np . dot ( ( ( b - bcenter ) / bscale ) . T , ( ( a - acenter ) / ascale ) ) )
if reflection :
R = np . dot ( u , vh )
else :
R = np . dot ( np . dot ( u , np . diag ( [ 1 , 1 , np . linalg . det ( np . dot ( u , vh ) ) ] ) ) , vh )
# Compute our 4D transformation matrix encoding
# a - > ( R @ ( a - acenter ) / ascale ) * bscale + bcenter
# = ( bscale / ascale ) R @ a + ( bcenter - ( bscale / ascale ) R @ acenter )
translation = bcenter - ( bscale / ascale ) * np . dot ( R , acenter )
matrix = np . hstack ( ( bscale / ascale * R , translation . reshape ( - 1 , 1 ) ) )
matrix = np . vstack ( ( matrix , np . array ( [ 0. ] * ( a . shape [ 1 ] ) + [ 1. ] ) . reshape ( 1 , - 1 ) ) )
if return_cost :
transformed = transform_points ( a , matrix )
cost = ( ( b - transformed ) ** 2 ) . mean ( )
return matrix , transformed , cost
else :
return matrix
|
def fetch_starting_at_coord ( self , coord ) : # b2 = BAMFile ( self . path , blockStart = coord [ 0 ] , innerStart = coord [ 1 ] , index _ obj = self . index , reference = self . _ reference )
"""starting at a certain coordinate was supposed to make output
. . warning : : creates a new instance of a BAMFile object when maybe the one we had would have worked"""
|
b2 = BAMFile ( self . path , BAMFile . Options ( blockStart = coord [ 0 ] , innerStart = coord [ 1 ] , reference = self . reference ) )
return b2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.