signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def remove_key ( self , key ) :
"""Remove own key , value""" | try :
self . _own_keys . discard ( key )
if self . _parent and self . _parent . has_key ( key ) :
self . _container [ key ] = self . _parent [ key ]
else :
del self . _container [ key ]
except KeyError :
pass |
def filter_data ( data , kernel , mode = 'constant' , fill_value = 0.0 , check_normalization = False ) :
"""Convolve a 2D image with a 2D kernel .
The kernel may either be a 2D ` ~ numpy . ndarray ` or a
` ~ astropy . convolution . Kernel2D ` object .
Parameters
data : array _ like
The 2D array of the image .
kernel : array - like ( 2D ) or ` ~ astropy . convolution . Kernel2D `
The 2D kernel used to filter the input ` ` data ` ` . Filtering the
` ` data ` ` will smooth the noise and maximize detectability of
objects with a shape similar to the kernel .
mode : { ' constant ' , ' reflect ' , ' nearest ' , ' mirror ' , ' wrap ' } , optional
The ` ` mode ` ` determines how the array borders are handled . For
the ` ` ' constant ' ` ` mode , values outside the array borders are
set to ` ` fill _ value ` ` . The default is ` ` ' constant ' ` ` .
fill _ value : scalar , optional
Value to fill data values beyond the array borders if ` ` mode ` `
is ` ` ' constant ' ` ` . The default is ` ` 0.0 ` ` .
check _ normalization : bool , optional
If ` True ` then a warning will be issued if the kernel is not
normalized to 1.""" | from scipy import ndimage
if kernel is not None :
if isinstance ( kernel , Kernel2D ) :
kernel_array = kernel . array
else :
kernel_array = kernel
if check_normalization :
if not np . allclose ( np . sum ( kernel_array ) , 1.0 ) :
warnings . warn ( 'The kernel is not normalized.' , AstropyUserWarning )
# NOTE : astropy . convolution . convolve fails with zero - sum
# kernels ( used in findstars ) ( cf . astropy # 1647)
# NOTE : if data is int and kernel is float , ndimage . convolve
# will return an int image - here we make the data float so
# that a float image is always returned
return ndimage . convolve ( data . astype ( float ) , kernel_array , mode = mode , cval = fill_value )
else :
return data |
def shared_variantcall ( call_fn , name , align_bams , ref_file , items , assoc_files , region = None , out_file = None ) :
"""Provide base functionality for prepping and indexing for variant calling .""" | config = items [ 0 ] [ "config" ]
if out_file is None :
if vcfutils . is_paired_analysis ( align_bams , items ) :
out_file = "%s-paired-variants.vcf.gz" % config [ "metdata" ] [ "batch" ]
else :
out_file = "%s-variants.vcf.gz" % os . path . splitext ( align_bams [ 0 ] ) [ 0 ]
if not file_exists ( out_file ) :
logger . debug ( "Genotyping with {name}: {region} {fname}" . format ( name = name , region = region , fname = os . path . basename ( align_bams [ 0 ] ) ) )
variant_regions = bedutils . population_variant_regions ( items , merged = True )
target_regions = subset_variant_regions ( variant_regions , region , out_file , items = items )
if ( variant_regions is not None and isinstance ( target_regions , six . string_types ) and not os . path . isfile ( target_regions ) ) :
vcfutils . write_empty_vcf ( out_file , config )
else :
with file_transaction ( config , out_file ) as tx_out_file :
call_fn ( align_bams , ref_file , items , target_regions , tx_out_file )
if out_file . endswith ( ".gz" ) :
out_file = vcfutils . bgzip_and_index ( out_file , config )
return out_file |
def metaseries_description_metadata ( description ) :
"""Return metatata from MetaSeries image description as dict .""" | if not description . startswith ( '<MetaData>' ) :
raise ValueError ( 'invalid MetaSeries image description' )
from xml . etree import cElementTree as etree
# delayed import
root = etree . fromstring ( description )
types = { 'float' : float , 'int' : int , 'bool' : lambda x : asbool ( x , 'on' , 'off' ) }
def parse ( root , result ) : # recursive
for child in root :
attrib = child . attrib
if not attrib :
result [ child . tag ] = parse ( child , { } )
continue
if 'id' in attrib :
i = attrib [ 'id' ]
t = attrib [ 'type' ]
v = attrib [ 'value' ]
if t in types :
result [ i ] = types [ t ] ( v )
else :
result [ i ] = v
return result
adict = parse ( root , { } )
if 'Description' in adict :
adict [ 'Description' ] = adict [ 'Description' ] . replace ( ' ' , '\n' )
return adict |
def _build ( self , leaves ) :
"""Private helper function to create the next aggregation level and put all references in place .""" | new , odd = [ ] , None
# check if even number of leaves , promote odd leaf to next level , if not
if len ( leaves ) % 2 == 1 :
odd = leaves . pop ( - 1 )
for i in range ( 0 , len ( leaves ) , 2 ) :
newnode = Node ( leaves [ i ] . val + leaves [ i + 1 ] . val )
newnode . l , newnode . r = leaves [ i ] , leaves [ i + 1 ]
leaves [ i ] . side , leaves [ i + 1 ] . side , leaves [ i ] . p , leaves [ i + 1 ] . p = 'L' , 'R' , newnode , newnode
leaves [ i ] . sib , leaves [ i + 1 ] . sib = leaves [ i + 1 ] , leaves [ i ]
new . append ( newnode )
if odd :
new . append ( odd )
return new |
def fit ( self , X , y , ** kwargs ) :
"""A simple pass - through method ; calls fit on the estimator and then
draws the alpha - error plot .""" | self . estimator . fit ( X , y , ** kwargs )
self . draw ( )
return self |
def running_coordination_number ( coordinates_a , coordinates_b , periodic , binsize = 0.002 , cutoff = 1.5 ) :
"""This is the cumulative radial distribution
function , also called running coordination number""" | x , y = rdf ( coordinates_a , coordinates_b , periodic = periodic , normalize = False , binsize = binsize , cutoff = cutoff )
y = y . astype ( 'float32' ) / len ( coordinates_a )
y = np . cumsum ( y )
return x , y |
async def fetchone ( self ) :
"""Fetch one row , just like DB - API cursor . fetchone ( ) .
If a row is present , the cursor remains open after this is called .
Else the cursor is automatically closed and None is returned .""" | try :
row = await self . _cursor . fetchone ( )
except AttributeError :
self . _non_result ( )
else :
if row is not None :
return self . _process_rows ( [ row ] ) [ 0 ]
else :
await self . close ( )
return None |
def get_full_path ( request ) :
"""Return the current relative path including the query string .
Eg : “ / foo / bar / ? page = 1”""" | path = request . fullpath
query_string = request . environ . get ( 'QUERY_STRING' )
if query_string :
path += '?' + to_native ( query_string )
return path |
def csrf_token ( ) :
"""Generate a token string from bytes arrays . The token in the session is user
specific .""" | if "_csrf_token" not in session :
session [ "_csrf_token" ] = os . urandom ( 128 )
return hmac . new ( app . secret_key , session [ "_csrf_token" ] , digestmod = sha1 ) . hexdigest ( ) |
def p_Exception ( p ) :
"""Exception : exception IDENTIFIER Inheritance " { " ExceptionMembers " } " " ; " """ | p [ 0 ] = model . Exception ( name = p [ 2 ] , parent = p [ 3 ] , members = p [ 5 ] ) |
def AddMembers ( self , * Members ) :
"""Adds new members to the chat .
: Parameters :
Members : ` User `
One or more users to add .""" | self . _Alter ( 'ADDMEMBERS' , ', ' . join ( [ x . Handle for x in Members ] ) ) |
def getPassage ( self , urn , inventory = None , context = None ) :
"""Retrieve a passage
: param urn : URN identifying the text ' s passage ( Minimum depth : 1)
: type urn : text
: param inventory : Name of the inventory
: type inventory : text
: param context : Number of citation units at the same level of the citation hierarchy as the requested urn , immediately preceding and immediately following the requested urn to include in the reply
: type context : int
: rtype : str""" | return self . call ( { "inv" : inventory , "urn" : urn , "context" : context , "request" : "GetPassage" } ) |
def remove_colors ( ) :
"""Remove colors from the output ( no escape sequences )""" | for i in CONF [ "COLORS" ] :
if isinstance ( CONF [ "COLORS" ] [ i ] , dict ) :
for j in CONF [ "COLORS" ] [ i ] :
CONF [ "COLORS" ] [ i ] [ j ] = ""
else :
CONF [ "COLORS" ] [ i ] = "" |
def cmd ( self , tgt , fun , arg = ( ) , timeout = None , tgt_type = 'glob' , ret = '' , jid = '' , full_return = False , kwarg = None , ** kwargs ) :
'''Synchronously execute a command on targeted minions
The cmd method will execute and wait for the timeout period for all
minions to reply , then it will return all minion data at once .
. . code - block : : python
> > > import salt . client
> > > local = salt . client . LocalClient ( )
> > > local . cmd ( ' * ' , ' cmd . run ' , [ ' whoami ' ] )
{ ' jerry ' : ' root ' }
With extra keyword arguments for the command function to be run :
. . code - block : : python
local . cmd ( ' * ' , ' test . arg ' , [ ' arg1 ' , ' arg2 ' ] , kwarg = { ' foo ' : ' bar ' } )
Compound commands can be used for multiple executions in a single
publish . Function names and function arguments are provided in separate
lists but the index values must correlate and an empty list must be
used if no arguments are required .
. . code - block : : python
> > > local . cmd ( ' * ' , [
' grains . items ' ,
' sys . doc ' ,
' cmd . run ' ,
[ ' uptime ' ] ,
: param tgt : Which minions to target for the execution . Default is shell
glob . Modified by the ` ` tgt _ type ` ` option .
: type tgt : string or list
: param fun : The module and function to call on the specified minions of
the form ` ` module . function ` ` . For example ` ` test . ping ` ` or
` ` grains . items ` ` .
Compound commands
Multiple functions may be called in a single publish by
passing a list of commands . This can dramatically lower
overhead and speed up the application communicating with Salt .
This requires that the ` ` arg ` ` param is a list of lists . The
` ` fun ` ` list and the ` ` arg ` ` list must correlate by index
meaning a function that does not take arguments must still have
a corresponding empty list at the expected index .
: type fun : string or list of strings
: param arg : A list of arguments to pass to the remote function . If the
function takes no arguments ` ` arg ` ` may be omitted except when
executing a compound command .
: type arg : list or list - of - lists
: param timeout : Seconds to wait after the last minion returns but
before all minions return .
: param tgt _ type : The type of ` ` tgt ` ` . Allowed values :
* ` ` glob ` ` - Bash glob completion - Default
* ` ` pcre ` ` - Perl style regular expression
* ` ` list ` ` - Python list of hosts
* ` ` grain ` ` - Match based on a grain comparison
* ` ` grain _ pcre ` ` - Grain comparison with a regex
* ` ` pillar ` ` - Pillar data comparison
* ` ` pillar _ pcre ` ` - Pillar data comparison with a regex
* ` ` nodegroup ` ` - Match on nodegroup
* ` ` range ` ` - Use a Range server for matching
* ` ` compound ` ` - Pass a compound match string
* ` ` ipcidr ` ` - Match based on Subnet ( CIDR notation ) or IPv4 address .
. . versionchanged : : 2017.7.0
Renamed from ` ` expr _ form ` ` to ` ` tgt _ type ` `
: param ret : The returner to use . The value passed can be single
returner , or a comma delimited list of returners to call in order
on the minions
: param kwarg : A dictionary with keyword arguments for the function .
: param full _ return : Output the job return only ( default ) or the full
return including exit code and other job metadata .
: param kwargs : Optional keyword arguments .
Authentication credentials may be passed when using
: conf _ master : ` external _ auth ` .
For example : ` ` local . cmd ( ' * ' , ' test . ping ' , username = ' saltdev ' ,
password = ' saltdev ' , eauth = ' pam ' ) ` ` .
Or : ` ` local . cmd ( ' * ' , ' test . ping ' ,
token = ' 5871821ea51754fdcea8153c1c745433 ' ) ` `
: returns : A dictionary with the result of the execution , keyed by
minion ID . A compound command will return a sub - dictionary keyed by
function name .''' | was_listening = self . event . cpub
try :
pub_data = self . run_job ( tgt , fun , arg , tgt_type , ret , timeout , jid , kwarg = kwarg , listen = True , ** kwargs )
if not pub_data :
return pub_data
ret = { }
for fn_ret in self . get_cli_event_returns ( pub_data [ 'jid' ] , pub_data [ 'minions' ] , self . _get_timeout ( timeout ) , tgt , tgt_type , ** kwargs ) :
if fn_ret :
for mid , data in six . iteritems ( fn_ret ) :
ret [ mid ] = ( data if full_return else data . get ( 'ret' , { } ) )
for failed in list ( set ( pub_data [ 'minions' ] ) - set ( ret ) ) :
ret [ failed ] = False
return ret
finally :
if not was_listening :
self . event . close_pub ( ) |
def verbosedump ( value , fn , compress = None ) :
"""Verbose wrapper around dump""" | print ( 'Saving "%s"... (%s)' % ( fn , type ( value ) ) )
dump ( value , fn , compress = compress ) |
def walk ( self ) :
"""Walk the directory like os . path
( yields a 3 - tuple ( dirpath , dirnames , filenames )
except it exclude all files / directories on the fly .""" | for root , dirs , files in os . walk ( self . path , topdown = True ) : # TODO relative walk , recursive call if root excluder found ? ? ?
# root _ excluder = get _ root _ excluder ( root )
ndirs = [ ]
# First we exclude directories
for d in list ( dirs ) :
if self . is_excluded ( os . path . join ( root , d ) ) :
dirs . remove ( d )
elif not os . path . islink ( os . path . join ( root , d ) ) :
ndirs . append ( d )
nfiles = [ ]
for fpath in ( os . path . join ( root , f ) for f in files ) :
if not self . is_excluded ( fpath ) and not os . path . islink ( fpath ) :
nfiles . append ( os . path . relpath ( fpath , root ) )
yield root , ndirs , nfiles |
def create_hierarchy ( self , * args , ** kwargs ) :
"""Pass through to provider HierarchyAdminSession . create _ hierarchy""" | # Implemented from kitosid template for -
# osid . resource . BinAdminSession . create _ bin
return Hierarchy ( self . _provider_manager , self . _get_provider_session ( 'hierarchy_admin_session' ) . create_hierarchy ( * args , ** kwargs ) , self . _runtime , self . _proxy ) |
def p_label_list_list ( p ) :
"""label _ list : label _ list COMMA ID
| label _ list COMMA NUMBER""" | p [ 0 ] = p [ 1 ]
entry = check_and_make_label ( p [ 3 ] , p . lineno ( 3 ) )
p [ 1 ] . append ( entry ) |
def _get_zeropoint ( expnum , ccd , prefix = None , version = 'p' ) :
"""Retrieve the zeropoint stored in the tags associated with this image .
@ param expnum : Exposure number
@ param ccd : ccd of the exposure
@ param prefix : possible prefix ( such as ' fk ' )
@ param version : which version : p , s , or o ?
@ return : zeropoint""" | if prefix is not None :
DeprecationWarning ( "Prefix is no longer used here as the 'fk' and 's' have the same zeropoint." )
key = "zeropoint_{:1s}{:02d}" . format ( version , int ( ccd ) )
return get_tag ( expnum , key ) |
def patronymic ( self , gender : Gender = None ) -> str :
"""Generate random patronymic name .
: param gender : Gender of person .
: return : Patronymic name .
: Example :
Алексеевна .""" | gender = self . _validate_enum ( gender , Gender )
patronymics = self . _data [ 'patronymic' ] [ gender ]
return self . random . choice ( patronymics ) |
def run_daemon ( self ) :
"""Used as daemon starter .
Warning :
DO NOT OVERRIDE THIS .""" | try :
self . daemon_runner . do_action ( )
except daemon . runner . DaemonRunnerStopFailureError :
self . onStopFail ( )
except SystemExit :
self . onExit ( ) |
def get_downbeat_steps ( self ) :
"""Return the indices of time steps that contain downbeats .
Returns
downbeat _ steps : list
The indices of time steps that contain downbeats .""" | if self . downbeat is None :
return [ ]
downbeat_steps = np . nonzero ( self . downbeat ) [ 0 ] . tolist ( )
return downbeat_steps |
def connect_ssl ( cls , user , password , endpoints , ca_certs = None , validate = None ) :
"""Creates an SSL transport to the first endpoint ( aserver ) to which
we successfully connect""" | if isinstance ( endpoints , basestring ) :
endpoints = [ endpoints ]
transport = SingleEndpointTransport ( SocketTransport . connect_ssl , endpoints , ca_certs = ca_certs , validate = validate )
return cls ( transport , user , password ) |
def split ( self , location , distance ) :
""": returns : ( close _ sites , far _ sites )""" | if distance is None : # all close
return self , None
close = location . distance_to_mesh ( self ) < distance
return self . filter ( close ) , self . filter ( ~ close ) |
def load_or_create_vocabs ( source_paths : List [ str ] , target_path : str , source_vocab_paths : List [ Optional [ str ] ] , target_vocab_path : Optional [ str ] , shared_vocab : bool , num_words_source : Optional [ int ] , word_min_count_source : int , num_words_target : Optional [ int ] , word_min_count_target : int , pad_to_multiple_of : Optional [ int ] = None ) -> Tuple [ List [ Vocab ] , Vocab ] :
"""Returns vocabularies for source files ( including factors ) and target .
If the respective vocabulary paths are not None , the vocabulary is read from the path and returned .
Otherwise , it is built from the support and saved to the path .
: param source _ paths : The path to the source text ( and optional token - parallel factor files ) .
: param target _ path : The target text .
: param source _ vocab _ paths : The source vocabulary path ( and optional factor vocabulary paths ) .
: param target _ vocab _ path : The target vocabulary path .
: param shared _ vocab : Whether the source and target vocabularies are shared .
: param num _ words _ source : Number of words in the source vocabulary .
: param word _ min _ count _ source : Minimum frequency of words in the source vocabulary .
: param num _ words _ target : Number of words in the target vocabulary .
: param word _ min _ count _ target : Minimum frequency of words in the target vocabulary .
: param pad _ to _ multiple _ of : If not None , pads the vocabularies to a size that is the next multiple of this int .
: return : List of source vocabularies ( for source and factors ) , and target vocabulary .""" | source_path , * source_factor_paths = source_paths
source_vocab_path , * source_factor_vocab_paths = source_vocab_paths
logger . info ( "=============================" )
logger . info ( "Loading/creating vocabularies" )
logger . info ( "=============================" )
logger . info ( "(1) Surface form vocabularies (source & target)" )
if shared_vocab :
if source_vocab_path and target_vocab_path :
vocab_source = vocab_from_json ( source_vocab_path )
vocab_target = vocab_from_json ( target_vocab_path )
utils . check_condition ( are_identical ( vocab_source , vocab_target ) , "Shared vocabulary requires identical source and target vocabularies. " "The vocabularies in %s and %s are not identical." % ( source_vocab_path , target_vocab_path ) )
elif source_vocab_path is None and target_vocab_path is None :
utils . check_condition ( num_words_source == num_words_target , "A shared vocabulary requires the number of source and target words to be the same." )
utils . check_condition ( word_min_count_source == word_min_count_target , "A shared vocabulary requires the minimum word count for source and target " "to be the same." )
vocab_source = vocab_target = build_from_paths ( paths = [ source_path , target_path ] , num_words = num_words_source , min_count = word_min_count_source , pad_to_multiple_of = pad_to_multiple_of )
else :
vocab_path = source_vocab_path if source_vocab_path is not None else target_vocab_path
logger . info ( "Using %s as a shared source/target vocabulary." % vocab_path )
vocab_source = vocab_target = vocab_from_json ( vocab_path )
else :
vocab_source = load_or_create_vocab ( source_path , source_vocab_path , num_words_source , word_min_count_source , pad_to_multiple_of = pad_to_multiple_of )
vocab_target = load_or_create_vocab ( target_path , target_vocab_path , num_words_target , word_min_count_target , pad_to_multiple_of = pad_to_multiple_of )
vocab_source_factors = [ ]
# type : List [ Vocab ]
if source_factor_paths :
logger . info ( "(2) Additional source factor vocabularies" )
# source factor vocabs are always created
for factor_path , factor_vocab_path in zip ( source_factor_paths , source_factor_vocab_paths ) :
vocab_source_factors . append ( load_or_create_vocab ( factor_path , factor_vocab_path , num_words_source , word_min_count_source ) )
return [ vocab_source ] + vocab_source_factors , vocab_target |
def return_markers ( self , state = 'MicromedCode' ) :
"""Return all the markers ( also called triggers or events ) .
Returns
list of dict
where each dict contains ' name ' as str , ' start ' and ' end ' as float
in seconds from the start of the recordings , and ' chan ' as list of
str with the channels involved ( if not of relevance , it ' s None ) .
Raises
FileNotFoundError
when it cannot read the events for some reason ( don ' t use other
exceptions ) .""" | markers = [ ]
try :
all_states = self . _read_states ( )
except ValueError : # cryptic error when reading states
return markers
try :
x = all_states [ state ]
except KeyError :
return markers
markers = [ ]
i_mrk = hstack ( ( 0 , where ( diff ( x ) ) [ 0 ] + 1 , len ( x ) ) )
for i0 , i1 in zip ( i_mrk [ : - 1 ] , i_mrk [ 1 : ] ) :
marker = { 'name' : str ( x [ i0 ] ) , 'start' : ( i0 ) / self . s_freq , 'end' : i1 / self . s_freq , }
markers . append ( marker )
return markers |
def bump ( args : argparse . Namespace ) -> None :
""": args : An argparse . Namespace object .
This function is bound to the ' bump ' sub - command . It increments the version
integer of the user ' s choice ( ' major ' , ' minor ' , or ' patch ' ) .""" | try :
last_tag = last_git_release_tag ( git_tags ( ) )
except NoGitTagsException :
print ( SemVer ( 0 , 1 , 0 ) )
exit ( 0 )
last_ver = git_tag_to_semver ( last_tag )
if args . type == 'patch' :
print ( last_ver . bump_patch ( ) )
elif args . type == 'minor' :
print ( last_ver . bump_minor ( ) )
elif args . type == 'major' :
print ( last_ver . bump_major ( ) ) |
def remove_repo_from_team ( self , auth , team_id , repo_name ) :
"""Remove repo from team .
: param auth . Authentication auth : authentication object , must be admin - level
: param str team _ id : Team ' s id
: param str repo _ name : Name of the repo to be removed from the team
: raises NetworkFailure : if there is an error communicating with the server
: raises ApiFailure : if the request cannot be serviced""" | url = "/admin/teams/{t}/repos/{r}" . format ( t = team_id , r = repo_name )
self . delete ( url , auth = auth ) |
def first ( sequence , message = None ) :
"""The first item in that sequence
If there aren ' t any , raise a ValueError with that message""" | try :
return next ( iter ( sequence ) )
except StopIteration :
raise ValueError ( message or ( 'Sequence is empty: %s' % sequence ) ) |
def complete ( self , message , endpoint , return_to ) :
"""Process the OpenID message , using the specified endpoint
and return _ to URL as context . This method will handle any
OpenID message that is sent to the return _ to URL .""" | mode = message . getArg ( OPENID_NS , 'mode' , '<No mode set>' )
modeMethod = getattr ( self , '_complete_' + mode , self . _completeInvalid )
return modeMethod ( message , endpoint , return_to ) |
def log_middleware ( store ) :
"""log all actions to console as they are dispatched""" | def wrapper ( next_ ) :
def log_dispatch ( action ) :
print ( 'Dispatch Action:' , action )
return next_ ( action )
return log_dispatch
return wrapper |
def repercent_broken_unicode ( path ) :
"""As per section 3.2 of RFC 3987 , step three of converting a URI into an IRI ,
we need to re - percent - encode any octet produced that is not part of a
strictly legal UTF - 8 octet sequence .""" | # originally from django . utils . encoding
while True :
try :
return path . decode ( 'utf-8' )
except UnicodeDecodeError as e :
repercent = quote ( path [ e . start : e . end ] , safe = b"/#%[]=:;$&()+,!?*@'~" )
path = path [ : e . start ] + repercent . encode ( 'ascii' ) + path [ e . end : ] |
def is_sharable ( self ) :
"""Is sharable if marked as and if buttons are sharable ( they might
hold sensitive data ) .""" | return self . sharable and all ( x . is_sharable ( ) for x in self . buttons ) |
def calculate_residue_counts_perstrain ( protein_pickle_path , outdir , pdbflex_keys_file , wt_pid_cutoff = None , force_rerun = False ) :
"""Writes out a feather file for a PROTEIN counting amino acid occurences for ALL STRAINS along with SUBSEQUENCES""" | from collections import defaultdict
from ssbio . protein . sequence . seqprop import SeqProp
from ssbio . protein . sequence . properties . residues import _aa_property_dict_one
log = logging . getLogger ( __name__ )
protein_id = op . splitext ( op . basename ( protein_pickle_path ) ) [ 0 ] . split ( '_' ) [ 0 ]
protein_df_outfile = op . join ( outdir , '{}_protein_strain_properties.fthr' . format ( protein_id ) )
if ssbio . utils . force_rerun ( flag = force_rerun , outfile = protein_df_outfile ) :
protein = ssbio . io . load_pickle ( protein_pickle_path )
# First calculate disorder cuz i forgot to
protein . get_all_disorder_predictions ( representative_only = True )
# Then get all subsequences
all_protein_subseqs = protein . get_all_subsequences ( pdbflex_keys_file = pdbflex_keys_file )
if not all_protein_subseqs :
log . error ( '{}: cannot run subsequence calculator' . format ( protein . id ) )
return
# Each strain gets a dictionary
strain_to_infodict = defaultdict ( dict )
for seqprop_to_analyze in protein . sequences :
if seqprop_to_analyze . id == protein . representative_sequence . id :
strain_id = 'K12'
elif type ( seqprop_to_analyze ) == SeqProp and seqprop_to_analyze . id != protein . id : # This is to filter out other KEGGProps or UniProtProps
strain_id = seqprop_to_analyze . id . split ( '_' , 1 ) [ 1 ]
# This split should work for all strains
else :
continue
# # Additional filtering for genes marked as orthologous but actually have large deletions or something
# # TODO : experiment with other cutoffs ?
if wt_pid_cutoff :
aln = protein . sequence_alignments . get_by_id ( '{0}_{0}_{1}' . format ( protein . id , seqprop_to_analyze . id ) )
if aln . annotations [ 'percent_identity' ] < wt_pid_cutoff :
continue
# # # # # # Calculate " all " properties # # # # #
seqprop_to_analyze . get_biopython_pepstats ( )
# [ ALL ] aa _ count
if 'amino_acids_percent-biop' not in seqprop_to_analyze . annotations : # May not run if weird amino acids in the sequence
log . warning ( 'Protein {}, sequence {}: skipping, unable to run Biopython ProteinAnalysis' . format ( protein . id , seqprop_to_analyze . id ) )
continue
strain_to_infodict [ strain_id ] . update ( { 'aa_count_{}' . format ( k ) : v for k , v in seqprop_to_analyze . annotations [ 'amino_acids_content-biop' ] . items ( ) } )
# [ ALL ] aa _ count _ total
strain_to_infodict [ strain_id ] [ 'aa_count_total' ] = seqprop_to_analyze . seq_len
# # # # # # Calculate subsequence properties # # # # #
for prop , propdict in all_protein_subseqs . items ( ) :
strain_to_infodict [ strain_id ] . update ( protein . get_subseq_props ( property_dict = propdict , property_name = prop , seqprop = seqprop_to_analyze ) )
protein_df = pd . DataFrame ( strain_to_infodict )
protein_df . reset_index ( ) . to_feather ( protein_df_outfile )
return protein_pickle_path , protein_df_outfile |
def _translate_ldm ( self , oprnd1 , oprnd2 , oprnd3 ) :
"""Return a formula representation of a LDM instruction .""" | assert oprnd1 . size and oprnd3 . size
assert oprnd1 . size == self . _address_size
op1_var = self . _translate_src_oprnd ( oprnd1 )
op3_var , op3_var_constrs = self . _translate_dst_oprnd ( oprnd3 )
exprs = [ ]
for i in reversed ( range ( 0 , oprnd3 . size , 8 ) ) :
exprs += [ self . _mem_curr [ op1_var + i // 8 ] == smtfunction . extract ( op3_var , i , 8 ) ]
return exprs + op3_var_constrs |
def get_nearest_forecast_site ( self , latitude = None , longitude = None ) :
"""This function returns the nearest Site object to the specified
coordinates .""" | if longitude is None :
print ( 'ERROR: No latitude given.' )
return False
if latitude is None :
print ( 'ERROR: No latitude given.' )
return False
nearest = False
distance = None
sites = self . get_forecast_sites ( )
# Sometimes there is a TypeError exception here : sites is None
# So , sometimes self . get _ all _ sites ( ) has returned None .
for site in sites :
new_distance = self . _distance_between_coords ( float ( site . longitude ) , float ( site . latitude ) , float ( longitude ) , float ( latitude ) )
if ( ( distance == None ) or ( new_distance < distance ) ) :
distance = new_distance
nearest = site
# If the nearest site is more than 30km away , raise an error
if distance > 30 :
raise APIException ( "There is no site within 30km." )
return nearest |
def run ( self ) :
"""Will tag the currently active git commit id with the next release tag id""" | sha = VersionUtils . run_git_command ( [ "rev-parse" , "HEAD" ] , self . git_dir )
tag = self . distribution . get_version ( )
if self . has_tag ( tag , sha ) :
tags_sha = VersionUtils . run_git_command ( [ "rev-parse" , tag ] , self . git_dir )
if sha != tags_sha :
logger . error ( "git tag {0} sha does not match the sha requesting to be tagged, you need to increment the version number, Skipped Tagging!" . format ( tag ) )
return
else :
logger . info ( "git tag {0} already exists for this repo, Skipped Tagging!" . format ( tag ) )
return
logger . info ( "Adding tag {0} for commit {1}" . format ( tag , sha ) )
if not self . dry_run :
VersionUtils . run_git_command ( [ "tag" , "-m" , '""' , tag , sha ] , self . git_dir , throw_on_error = True )
logger . info ( "Pushing tag {0} to remote {1}" . format ( tag , self . remote ) )
VersionUtils . run_git_command ( [ "push" , self . remote , tag ] , self . git_dir , throw_on_error = True ) |
def setExpertLevel ( self ) :
"""Set expert level""" | g = get_root ( self ) . globals
level = g . cpars [ 'expert_level' ]
# now set whether buttons are permanently enabled or not
if level == 0 or level == 1 :
self . load . setNonExpert ( )
self . save . setNonExpert ( )
self . unfreeze . setNonExpert ( )
self . start . setNonExpert ( )
self . stop . setNonExpert ( )
elif level == 2 :
self . load . setExpert ( )
self . save . setExpert ( )
self . unfreeze . setExpert ( )
self . start . setExpert ( )
self . stop . setExpert ( ) |
def fake2db_couchdb_initiator ( self , number_of_rows , name = None , custom = None ) :
'''Main handler for the operation''' | rows = number_of_rows
db = self . database_caller_creator ( name )
if custom :
self . custom_db_creator ( rows , db , custom )
sys . exit ( 0 )
self . data_filler_simple_registration ( rows , db )
self . data_filler_detailed_registration ( rows , db )
self . data_filler_company ( rows , db )
self . data_filler_user_agent ( rows , db )
self . data_filler_customer ( rows , db ) |
def server_close ( self ) :
"""Closes the socket server and any associated resources .""" | self . log . debug ( "Closing the socket server connection." )
TCPServer . server_close ( self )
self . queue_manager . close ( )
self . topic_manager . close ( )
if hasattr ( self . authenticator , 'close' ) :
self . authenticator . close ( )
self . shutdown ( ) |
def load ( * , name = "dummy" , options = { } , dry_run = False , ** kwargs ) :
"""Load a backup driver
: param name ( str , optional ) : name of the backup driver to load
: param options ( dict , optional ) : A dictionary passed to the driver
: param dry _ run ( bool , optional ) : Whether to activate dry run mode
: param \ * \ * kwargs : arbitrary keyword arguments
: raises ValueError : if specified driver does not exist""" | global _driver
# Try to load specified driver
if name in VALID_DRIVERS : # log the thing first
log . msg_debug ( "Attempting to load driver: {d}" . format ( d = name ) )
# Load the driver ( which is actually a python
# module inside the drivers directory )
_driver = importlib . import_module ( ".drivers.{name}" . format ( name = name ) , __package__ )
if _driver :
_driver . load ( dry_run = dry_run , ** options )
log . msg_debug ( "Backup driver '{driver}'" " has been loaded successfully!" . format ( driver = _driver . get_name ( ) ) )
else :
raise ValueError ( "Invalid backup driver name: {driver}" . format ( driver = name ) ) |
def add_residue_mindist ( self , residue_pairs = 'all' , scheme = 'closest-heavy' , ignore_nonprotein = True , threshold = None , periodic = True ) :
r"""Adds the minimum distance between residues to the feature list . See below how
the minimum distance can be defined . If the topology generated out of : py : obj : ` topfile `
contains information on periodic boundary conditions , the minimum image convention
will be used when computing distances .
Parameters
residue _ pairs : can be of two types :
' all '
Computes distances between all pairs of residues excluding first and second neighbors
ndarray ( ( n , 2 ) , dtype = int ) :
n x 2 array with the pairs residues for which distances will be computed
scheme : ' ca ' , ' closest ' , ' closest - heavy ' , default is closest - heavy
Within a residue , determines the sub - group atoms that will be considered when computing distances
ignore _ nonprotein : boolean , default True
Ignore residues that are not of protein type ( e . g . water molecules , post - traslational modifications etc )
threshold : float , optional , default is None
distances below this threshold ( in nm ) will result in a feature 1.0 , distances above will result in 0.0 . If
left to None , the numerical value will be returned
periodic : bool , optional , default = True
If ` periodic ` is True and the trajectory contains unitcell
information , we will treat dihedrals that cross periodic images
using the minimum image convention .
. . note : :
Using : py : obj : ` scheme ` = ' closest ' or ' closest - heavy ' with : py : obj : ` residue pairs ` = ' all '
will compute nearly all interatomic distances , for every frame , before extracting the closest pairs .
This can be very time consuming . Those schemes are intended to be used with a subset of residues chosen
via : py : obj : ` residue _ pairs ` .""" | from . distances import ResidueMinDistanceFeature
if scheme != 'ca' and is_string ( residue_pairs ) :
if residue_pairs == 'all' :
self . logger . warning ( "Using all residue pairs with schemes like closest or closest-heavy is " "very time consuming. Consider reducing the residue pairs" )
f = ResidueMinDistanceFeature ( self . topology , residue_pairs , scheme , ignore_nonprotein , threshold , periodic )
self . __add_feature ( f ) |
def has_delete_permission ( self , request , obj = None ) :
"""Default namespaces cannot be deleted .""" | if obj is not None and obj . fixed :
return False
return super ( NamespaceAdmin , self ) . has_delete_permission ( request , obj ) |
def __signal ( self , sig , verbose = None ) :
'''Helper class preventing code duplication . .
: param sig :
Signal to use ( e . g . " HUP " , " ALRM " )
: param verbose :
Overwrite : func : ` photon . Photon . m ` ' s ` verbose `
: returns :
| kill _ return | with specified ` pid `
. . | kill _ return | replace : :
: func : ` photon . Photon . m ` ' s result of killing ` pid `
. . | kill _ verbose | replace : : with visible shell warning''' | return self . m ( 'killing process %s with "%s"' % ( self . __pid , sig ) , cmdd = dict ( cmd = '%s kill -%s %d' % ( self . __sudo , sig , self . __pid ) ) , verbose = verbose ) |
def parse_file ( self , filename ) :
"""Load self from the file , such as " MANIFEST . MF " or " SIGNATURE . SF " .
: param filename : contains UTF - 8 encoded manifest""" | with open ( filename , "rb" , _BUFFERING ) as stream :
self . parse ( stream . read ( ) ) |
def get_headers ( self , container ) :
"""Return the headers for the specified container .""" | uri = "/%s" % utils . get_name ( container )
resp , resp_body = self . api . method_head ( uri )
return resp . headers |
def _get_mean ( self , data , dctx , dists ) :
"""Returns the mean intensity measure level from the tables
: param data :
The intensity measure level vector for the given magnitude and IMT
: param key :
The distance type
: param distances :
The distance vector for the given magnitude and IMT""" | # For values outside of the interpolation range use - 999 . to ensure
# value is identifiable and outside of potential real values
interpolator_mean = interp1d ( dists , data , bounds_error = False , fill_value = - 999. )
mean = interpolator_mean ( getattr ( dctx , self . distance_type ) )
# For those distances less than or equal to the shortest distance
# extrapolate the shortest distance value
mean [ getattr ( dctx , self . distance_type ) < ( dists [ 0 ] + 1.0E-3 ) ] = data [ 0 ]
# For those distances significantly greater than the furthest distance
# set to 1E - 20.
mean [ getattr ( dctx , self . distance_type ) > ( dists [ - 1 ] + 1.0E-3 ) ] = 1E-20
# If any distance is between the final distance and a margin of 0.001
# km then assign to smallest distance
mean [ mean < - 1. ] = data [ - 1 ]
return mean |
def start ( st_reg_number ) :
"""Checks the number valiaty for the Minas Gerais state""" | # st _ reg _ number = str ( st _ reg _ number )
number_state_registration_first_digit = st_reg_number [ 0 : 3 ] + '0' + st_reg_number [ 3 : len ( st_reg_number ) - 2 ]
weights_first_digit = [ 1 , 2 , 1 , 2 , 1 , 2 , 1 , 2 , 1 , 2 , 1 , 2 ]
wights_second_digit = [ 3 , 2 , 11 , 10 , 9 , 8 , 7 , 6 , 5 , 4 , 3 , 2 ]
first_digit = st_reg_number [ - 2 ]
second_digit = st_reg_number [ - 1 ]
sum_first_digit = 0
sum_second_digit = 0
sum_result_digit = ''
sum_end = 0
if len ( st_reg_number ) != 13 :
return False
for i in range ( 0 , 12 ) :
sum_first_digit = weights_first_digit [ i ] * int ( number_state_registration_first_digit [ i ] )
sum_result_digit = sum_result_digit + str ( sum_first_digit )
for i in range ( 0 , len ( sum_result_digit ) ) :
sum_end = sum_end + int ( sum_result_digit [ i ] )
if sum_end % 10 == 0 :
check_digit_one = 0
elif sum_end < 10 :
check_digit_one = 10 - sum_end
elif sum_end > 10 :
check_digit_one = ( 10 - sum_end % 10 )
if str ( check_digit_one ) != first_digit :
return False
number_state_registration_second_digit = st_reg_number + str ( check_digit_one )
for i in range ( 0 , 12 ) :
sum_second_digit = sum_second_digit + wights_second_digit [ i ] * int ( number_state_registration_second_digit [ i ] )
check_second_digit = 11 - sum_second_digit % 11
if sum_second_digit == 1 or sum_second_digit == 0 :
return second_digit == '0'
else :
return str ( check_second_digit ) == second_digit |
def get_config_path ( appdirs = DEFAULT_APPDIRS , file_name = DEFAULT_CONFIG_FILENAME ) :
"""Return the path where the config file is stored .
Args :
app _ name ( text _ type , optional ) : Name of the application , defaults to
` ` ' projecthamster ` ` . Allows you to use your own application specific
namespace if you wish .
file _ name ( text _ type , optional ) : Name of the config file . Defaults to
` ` config . conf ` ` .
Returns :
str : Fully qualified path ( dir & filename ) where we expect the config file .""" | return os . path . join ( appdirs . user_config_dir , file_name ) |
def get_profile_for_user ( user ) :
"""Returns site - specific profile for this user . Raises
` ` ProfileNotConfigured ` ` if ` ` settings . ACCOUNTS _ PROFILE _ MODEL ` ` is not
set , and ` ` ImproperlyConfigured ` ` if the corresponding model can ' t
be found .""" | if not hasattr ( user , '_yacms_profile' ) : # Raises ProfileNotConfigured if not bool ( ACCOUNTS _ PROFILE _ MODEL )
profile_model = get_profile_model ( )
profile_manager = profile_model . _default_manager . using ( user . _state . db )
user_field = get_profile_user_fieldname ( profile_model , user . __class__ )
profile , created = profile_manager . get_or_create ( ** { user_field : user } )
profile . user = user
user . _yacms_profile = profile
return user . _yacms_profile |
def _relevant_connections ( self , subsystem ) :
"""Identify connections that “ matter ” to this concept .
For a | MIC | , the important connections are those which connect the
purview to the mechanism ; for a | MIE | they are the connections from the
mechanism to the purview .
Returns an | N x N | matrix , where ` N ` is the number of nodes in this
corresponding subsystem , that identifies connections that “ matter ” to
this MICE :
` ` direction = = Direction . CAUSE ` ` :
` ` relevant _ connections [ i , j ] ` ` is ` ` 1 ` ` if node ` ` i ` ` is in the
cause purview and node ` ` j ` ` is in the mechanism ( and ` ` 0 ` `
otherwise ) .
` ` direction = = Direction . EFFECT ` ` :
` ` relevant _ connections [ i , j ] ` ` is ` ` 1 ` ` if node ` ` i ` ` is in the
mechanism and node ` ` j ` ` is in the effect purview ( and ` ` 0 ` `
otherwise ) .
Args :
subsystem ( Subsystem ) : The | Subsystem | of this MICE .
Returns :
np . ndarray : A | N x N | matrix of connections , where | N | is the size
of the network .
Raises :
ValueError : If ` ` direction ` ` is invalid .""" | _from , to = self . direction . order ( self . mechanism , self . purview )
return connectivity . relevant_connections ( subsystem . network . size , _from , to ) |
def pod_present ( name , namespace = 'default' , metadata = None , spec = None , source = '' , template = '' , ** kwargs ) :
'''Ensures that the named pod is present inside of the specified
namespace with the given metadata and spec .
If the pod exists it will be replaced .
name
The name of the pod .
namespace
The namespace holding the pod . The ' default ' one is going to be
used unless a different one is specified .
metadata
The metadata of the pod object .
spec
The spec of the pod object .
source
A file containing the definition of the pod ( metadata and
spec ) in the official kubernetes format .
template
Template engine to be used to render the source file .''' | ret = { 'name' : name , 'changes' : { } , 'result' : False , 'comment' : '' }
if ( metadata or spec ) and source :
return _error ( ret , '\'source\' cannot be used in combination with \'metadata\' or ' '\'spec\'' )
if metadata is None :
metadata = { }
if spec is None :
spec = { }
pod = __salt__ [ 'kubernetes.show_pod' ] ( name , namespace , ** kwargs )
if pod is None :
if __opts__ [ 'test' ] :
ret [ 'result' ] = None
ret [ 'comment' ] = 'The pod is going to be created'
return ret
res = __salt__ [ 'kubernetes.create_pod' ] ( name = name , namespace = namespace , metadata = metadata , spec = spec , source = source , template = template , saltenv = __env__ , ** kwargs )
ret [ 'changes' ] [ '{0}.{1}' . format ( namespace , name ) ] = { 'old' : { } , 'new' : res }
else :
if __opts__ [ 'test' ] :
ret [ 'result' ] = None
return ret
# TODO : fix replace _ namespaced _ pod validation issues
ret [ 'comment' ] = 'salt is currently unable to replace a pod without ' 'deleting it. Please perform the removal of the pod requiring ' 'the \'pod_absent\' state if this is the desired behaviour.'
ret [ 'result' ] = False
return ret
ret [ 'changes' ] = { 'metadata' : metadata , 'spec' : spec }
ret [ 'result' ] = True
return ret |
def hostname ( self ) :
"""Get the hostname that this connection is associated with""" | from six . moves . urllib . parse import urlparse
return urlparse ( self . _base_url ) . netloc . split ( ':' , 1 ) [ 0 ] |
def _getattrs ( self , obj , * attrs ) :
"""Return dictionary of given attrs on given object , if they exist ,
processing through _ filter _ value ( ) .""" | filtered_attrs = { }
for attr in attrs :
if hasattr ( obj , attr ) :
filtered_attrs [ attr ] = obj_to_string ( self . _filter_value ( getattr ( obj , attr ) ) )
return filtered_attrs |
def init_db ( self ) :
'''init _ db for the filesystem ensures that the base folder ( named
according to the studyid ) exists .''' | self . session = None
if not os . path . exists ( self . data_base ) :
mkdir_p ( self . data_base )
self . database = "%s/%s" % ( self . data_base , self . study_id )
if not os . path . exists ( self . database ) :
mkdir_p ( self . database ) |
def safe_reload ( modname : types . ModuleType ) -> Union [ None , str ] :
"""Catch and log any errors that arise from reimporting a module , but do not die .
: return : None when import was successful . String is the first line of the error message""" | try :
importlib . reload ( modname )
return None
except Exception as e :
logging . error ( "Failed to reimport module: %s" , modname )
msg , _ = backtrace . output_traceback ( e )
return msg |
def count ( self , other , r , attrs = None , info = { } ) :
"""Gray & Moore based fast dual tree counting .
r is the edge of bins :
- inf or r [ i - 1 ] < count [ i ] < = r [ i ]
attrs : None or tuple
if tuple , attrs = ( attr _ self , attr _ other )
Returns : count ,
count , weight of attrs is not None""" | r = numpy . array ( r , dtype = 'f8' )
return _core . KDNode . count ( self , other , r , attrs , info = info ) |
def _new_url ( self , relative_url ) :
"""Create new Url which points to new url .""" | return Url ( urljoin ( self . _base_url , relative_url ) , ** self . _default_kwargs ) |
def modify_password ( self , username , newpwd , oldpwd ) :
"""Params :
username - user name
newpwd - new password
oldpwd - old password""" | ret = self . command ( 'userManager.cgi?action=modifyPassword&name={0}&pwd={1}' '&pwdOld={2}' . format ( username , newpwd , oldpwd ) )
return ret . content . decode ( 'utf-8' ) |
def getWmAllowedActions ( self , win , str = False ) :
"""Get the list of allowed actions for the given window ( property
_ NET _ WM _ ALLOWED _ ACTIONS ) .
: param win : the window object
: param str : True to get a list of string allowed actions instead of int
: return : list of ( int | str )""" | wAllowedActions = ( self . _getProperty ( '_NET_WM_ALLOWED_ACTIONS' , win ) or [ ] )
if not str :
return wAllowedActions
return [ self . _getAtomName ( a ) for a in wAllowedActions ] |
def _make_tuple ( x ) :
"""TF has an obnoxious habit of being lenient with single vs tuple .""" | if isinstance ( x , prettytensor . PrettyTensor ) :
if x . is_sequence ( ) :
return tuple ( x . sequence )
else :
return ( x . tensor , )
elif isinstance ( x , tuple ) :
return x
elif ( isinstance ( x , collections . Sequence ) and not isinstance ( x , six . string_types ) ) :
return tuple ( x )
else :
return ( x , ) |
def _complex_response_to_error_adapter ( self , body ) :
"""Convert a list of error responses .""" | meta = body . get ( 'meta' )
errors = body . get ( 'errors' )
e = [ ]
for error in errors :
status = error [ 'status' ]
code = error [ 'code' ]
title = error [ 'title' ]
e . append ( ErrorDetails ( status , code , title ) )
return e , meta |
def check_bitdepth_rescale ( palette , bitdepth , transparent , alpha , greyscale ) :
"""Returns ( bitdepth , rescale ) pair .""" | if palette :
if len ( bitdepth ) != 1 :
raise ProtocolError ( "with palette, only a single bitdepth may be used" )
( bitdepth , ) = bitdepth
if bitdepth not in ( 1 , 2 , 4 , 8 ) :
raise ProtocolError ( "with palette, bitdepth must be 1, 2, 4, or 8" )
if transparent is not None :
raise ProtocolError ( "transparent and palette not compatible" )
if alpha :
raise ProtocolError ( "alpha and palette not compatible" )
if greyscale :
raise ProtocolError ( "greyscale and palette not compatible" )
return bitdepth , None
# No palette , check for sBIT chunk generation .
if greyscale and not alpha : # Single channel , L .
( bitdepth , ) = bitdepth
if bitdepth in ( 1 , 2 , 4 , 8 , 16 ) :
return bitdepth , None
if bitdepth > 8 :
targetbitdepth = 16
elif bitdepth == 3 :
targetbitdepth = 4
else :
assert bitdepth in ( 5 , 6 , 7 )
targetbitdepth = 8
return targetbitdepth , [ ( bitdepth , targetbitdepth ) ]
assert alpha or not greyscale
depth_set = tuple ( set ( bitdepth ) )
if depth_set in [ ( 8 , ) , ( 16 , ) ] : # No sBIT required .
( bitdepth , ) = depth_set
return bitdepth , None
targetbitdepth = ( 8 , 16 ) [ max ( bitdepth ) > 8 ]
return targetbitdepth , [ ( b , targetbitdepth ) for b in bitdepth ] |
def create_static_finding ( self , application_id , vulnerability_type , description , severity , parameter = None , file_path = None , native_id = None , column = None , line_text = None , line_number = None ) :
"""Creates a static finding with given properties .
: param application _ id : Application identifier number .
: param vulnerability _ type : Name of CWE vulnerability .
: param description : General description of the issue .
: param severity : Severity level from 0-8.
: param parameter : Request parameter for vulnerability .
: param file _ path : Location of source file .
: param native _ id : Specific identifier for vulnerability .
: param column : Column number for finding vulnerability source .
: param line _ text : Specific line text to finding vulnerability .
: param line _ number : Specific source line number to finding vulnerability .""" | if not parameter and not file_path :
raise AttributeError ( 'Static findings require either parameter or file_path to be present.' )
params = { 'isStatic' : True , 'vulnType' : vulnerability_type , 'longDescription' : description , 'severity' : severity }
if native_id :
params [ 'nativeId' ] = native_id
if column :
params [ 'column' ] = column
if line_text :
params [ 'lineText' ] = line_text
if line_number :
params [ 'lineNumber' ] = line_number
return self . _request ( 'POST' , 'rest/applications/' + str ( application_id ) + '/addFinding' , params ) |
def respond ( self , packet , peer , flags = 0 ) :
"""Send a message back to a peer .
: param packet : The data to send
: param peer : The address to send to , as a tuple ( host , port )
: param flags : Any sending flags you want to use for some reason""" | self . sock . sendto ( packet , flags , peer ) |
def partial_predict ( self , X , y = None ) :
"""Predict the closest cluster each sample in X belongs to .
In the vector quantization literature , ` cluster _ centers _ ` is called
the code book and each value returned by ` predict ` is the index of
the closest code in the code book .
Parameters
X : array - like shape = ( n _ samples , n _ features )
A single timeseries .
Returns
Y : array , shape = ( n _ samples , )
Index of the cluster that each sample belongs to""" | if isinstance ( X , md . Trajectory ) :
X . center_coordinates ( )
return super ( MultiSequenceClusterMixin , self ) . predict ( X ) |
def tryValue ( self , name , val , scope = '' ) :
"""For the given item name ( and scope ) , we are being asked to try
the given value to see if it would pass validation . We are not
to set it , but just try it . We return a tuple :
If it fails , we return : ( False , the last known valid value ) .
On success , we return : ( True , None ) .""" | # SIMILARITY BETWEEN THIS AND setParam ( ) SHOULD BE CONSOLIDATED !
# Set the value , even if invalid . It needs to be set before
# the validation step ( next ) .
theDict , oldVal = findScopedPar ( self , scope , name )
if oldVal == val :
return ( True , None )
# assume oldVal is valid
theDict [ name ] = val
# Check the proposed value . Ideally , we ' d like to
# ( somehow elegantly ) only check this one item . For now , the best
# shortcut is to only validate this section .
ans = self . validate ( self . _vtor , preserve_errors = True , section = theDict )
# No matter what ans is , immediately return the item to its original
# value since we are only checking the value here - not setting .
theDict [ name ] = oldVal
# Now see what the validation check said
errStr = ''
if ans != True :
flatStr = "All values are invalid!"
if ans != False :
flatStr = flattened2str ( configobj . flatten_errors ( self , ans ) )
errStr = "Validation error: " + flatStr
# for now this info is unused
# Done
if len ( errStr ) :
return ( False , oldVal )
# was an error
else :
return ( True , None ) |
def export ( export_path , vocabulary , embeddings , num_oov_buckets , preprocess_text ) :
"""Exports a TF - Hub module that performs embedding lookups .
Args :
export _ path : Location to export the module .
vocabulary : List of the N tokens in the vocabulary .
embeddings : Numpy array of shape [ N + K , M ] the first N rows are the
M dimensional embeddings for the respective tokens and the next K
rows are for the K out - of - vocabulary buckets .
num _ oov _ buckets : How many out - of - vocabulary buckets to add .
preprocess _ text : Whether to preprocess the input tensor by removing
punctuation and splitting on spaces .""" | # Write temporary vocab file for module construction .
tmpdir = tempfile . mkdtemp ( )
vocabulary_file = os . path . join ( tmpdir , "tokens.txt" )
with tf . gfile . GFile ( vocabulary_file , "w" ) as f :
f . write ( "\n" . join ( vocabulary ) )
vocab_size = len ( vocabulary )
embeddings_dim = embeddings . shape [ 1 ]
spec = make_module_spec ( vocabulary_file , vocab_size , embeddings_dim , num_oov_buckets , preprocess_text )
try :
with tf . Graph ( ) . as_default ( ) :
m = hub . Module ( spec )
# The embeddings may be very large ( e . g . , larger than the 2GB serialized
# Tensor limit ) . To avoid having them frozen as constant Tensors in the
# graph we instead assign them through the placeholders and feed _ dict
# mechanism .
p_embeddings = tf . placeholder ( tf . float32 )
load_embeddings = tf . assign ( m . variable_map [ EMBEDDINGS_VAR_NAME ] , p_embeddings )
with tf . Session ( ) as sess :
sess . run ( [ load_embeddings ] , feed_dict = { p_embeddings : embeddings } )
m . export ( export_path , sess )
finally :
shutil . rmtree ( tmpdir ) |
def create_item ( self , name ) :
"""create a new named todo list
: rtype : TodoListUX""" | item = self . app . create_item ( name )
return TodoListUX ( ux = self , controlled_list = item ) |
def _WriteFileChunk ( self , chunk ) :
"""Yields binary chunks , respecting archive file headers and footers .
Args :
chunk : the StreamedFileChunk to be written""" | if chunk . chunk_index == 0 : # Make sure size of the original file is passed . It ' s required
# when output _ writer is StreamingTarWriter .
st = os . stat_result ( ( 0o644 , 0 , 0 , 0 , 0 , 0 , chunk . total_size , 0 , 0 , 0 ) )
target_path = _ClientPathToString ( chunk . client_path , prefix = self . prefix )
yield self . archive_generator . WriteFileHeader ( target_path , st = st )
yield self . archive_generator . WriteFileChunk ( chunk . data )
if chunk . chunk_index == chunk . total_chunks - 1 :
yield self . archive_generator . WriteFileFooter ( )
self . archived_files . add ( chunk . client_path ) |
def _bqschema_to_nullsafe_dtypes ( schema_fields ) :
"""Specify explicit dtypes based on BigQuery schema .
This function only specifies a dtype when the dtype allows nulls .
Otherwise , use pandas ' s default dtype choice .
See : http : / / pandas . pydata . org / pandas - docs / dev / missing _ data . html
# missing - data - casting - rules - and - indexing""" | # If you update this mapping , also update the table at
# ` docs / source / reading . rst ` .
dtype_map = { "FLOAT" : np . dtype ( float ) , # pandas doesn ' t support timezone - aware dtype in DataFrame / Series
# constructors . It ' s more idiomatic to localize after construction .
# https : / / github . com / pandas - dev / pandas / issues / 25843
"TIMESTAMP" : "datetime64[ns]" , "TIME" : "datetime64[ns]" , "DATE" : "datetime64[ns]" , "DATETIME" : "datetime64[ns]" , }
dtypes = { }
for field in schema_fields :
name = str ( field [ "name" ] )
if field [ "mode" ] . upper ( ) == "REPEATED" :
continue
dtype = dtype_map . get ( field [ "type" ] . upper ( ) )
if dtype :
dtypes [ name ] = dtype
return dtypes |
def delete_entity_type ( namespace , workspace , etype , ename ) :
"""Delete entities in a workspace .
Note : This action is not reversible . Be careful !
Args :
namespace ( str ) : project to which workspace belongs
workspace ( str ) : Workspace name
etype ( str ) : Entity type
ename ( str , or iterable of str ) : unique entity id ( s )
Swagger :
https : / / api . firecloud . org / # ! / Entities / deleteEntities""" | uri = "workspaces/{0}/{1}/entities/delete" . format ( namespace , workspace )
if isinstance ( ename , string_types ) :
body = [ { "entityType" : etype , "entityName" : ename } ]
elif isinstance ( ename , Iterable ) :
body = [ { "entityType" : etype , "entityName" : i } for i in ename ]
return __post ( uri , json = body ) |
def price_projection ( price_data = price_data ( ) , ex_best_offers_overrides = ex_best_offers_overrides ( ) , virtualise = True , rollover_stakes = False ) :
"""Selection criteria of the returning price data .
: param list price _ data : PriceData filter to specify what market data we wish to receive .
: param dict ex _ best _ offers _ overrides : define order book depth , rollup method .
: param bool virtualise : whether to receive virtualised prices also .
: param bool rollover _ stakes : whether to accumulate volume at each price as sum of volume at that price and all better
prices .
: returns : price data criteria for market data .
: rtype : dict""" | args = locals ( )
return { to_camel_case ( k ) : v for k , v in args . items ( ) if v is not None } |
def node_branch ( self , astr_node , abranch ) :
"""Adds a branch to a node , i . e . depth addition . The given
node ' s md _ nodes is set to the abranch ' s mdict _ branch .""" | self . dict_branch [ astr_node ] . node_dictBranch ( abranch . dict_branch ) |
def evaluate ( molecules , ensemble_chunk , sort_order , options , output_queue = None ) :
"""Evaluate VS performance of each ensemble in ensemble _ chunk""" | results = { }
# { ( ' receptor _ 1 ' , . . . , ' receptor _ n ' ) : ensemble storage object }
for ensemble in ensemble_chunk :
results [ ensemble ] = calculate_performance ( molecules , ensemble , sort_order , options )
if output_queue is not None :
output_queue . put ( results )
else :
return results |
def setRefreshBlocked ( self , blocked ) :
"""Set to True to indicate that set the configuration should not be updated .
This setting is part of the model so that is shared by all CTIs .
Returns the old value .""" | wasBlocked = self . _refreshBlocked
logger . debug ( "Setting refreshBlocked from {} to {}" . format ( wasBlocked , blocked ) )
self . _refreshBlocked = blocked
return wasBlocked |
def create_release_id ( short , version , type , bp_short = None , bp_version = None , bp_type = None ) :
"""Create release _ id from given parts .
: param short : Release short name
: type short : str
: param version : Release version
: type version : str
: param version : Release type
: type version : str
: param bp _ short : Base Product short name
: type bp _ short : str
: param bp _ version : Base Product version
: type bp _ version : str
: param bp _ type : Base Product type
: rtype : str""" | if not is_valid_release_short ( short ) :
raise ValueError ( "Release short name is not valid: %s" % short )
if not is_valid_release_version ( version ) :
raise ValueError ( "Release short version is not valid: %s" % version )
if not is_valid_release_type ( type ) :
raise ValueError ( "Release type is not valid: %s" % type )
if type == "ga" :
result = "%s-%s" % ( short , version )
else :
result = "%s-%s-%s" % ( short , version , type )
if bp_short :
result += "@%s" % create_release_id ( bp_short , bp_version , bp_type )
return result |
def wait_for_edge ( self ) :
"""This will remove remove any callbacks you might have specified""" | GPIO . remove_event_detect ( self . _pin )
GPIO . wait_for_edge ( self . _pin , self . _edge ) |
def _receive ( self , root , directory , dirs , files , include , exclude ) :
"""Internal function processing each yield from os . walk .""" | self . _received += 1
if not self . symlinks :
where = root + os . path . sep + directory + os . path . sep
files = [ file_name for file_name in files if not os . path . islink ( where + file_name ) ]
include = FileSetState ( "Include" , directory , include , None if include else self . include )
exclude = FileSetState ( "Exclude" , directory , exclude , None if exclude else self . exclude )
if exclude . matches_all_files_all_subdirs ( ) : # Exclude everything and do no traverse any subdirectories
del dirs [ 0 : ]
matched = set ( )
else :
if include . no_possible_matches_in_subdirs ( ) : # Do no traverse any subdirectories
del dirs [ 0 : ]
matched = include . match ( set ( files ) )
matched -= exclude . match ( matched )
return matched , include , exclude |
def _checkin_remote_bundle ( self , remote , ref ) :
"""Checkin a remote bundle from a remote
: param remote : a Remote object
: param ref : Any bundle reference
: return : The vid of the loaded bundle""" | from ambry . bundle . process import call_interval
from ambry . orm . exc import NotFoundError
from ambry . orm import Remote
from ambry . util . flo import copy_file_or_flo
from tempfile import NamedTemporaryFile
assert isinstance ( remote , Remote )
@ call_interval ( 5 )
def cb ( r , total ) :
self . logger . info ( "{}: Downloaded {} bytes" . format ( ref , total ) )
b = None
try :
b = self . bundle ( ref )
self . logger . info ( "{}: Already installed" . format ( ref ) )
vid = b . identity . vid
except NotFoundError :
self . logger . info ( "{}: Syncing" . format ( ref ) )
db_dir = self . filesystem . downloads ( 'bundles' )
db_f = os . path . join ( db_dir , ref )
# FIXME . Could get multiple versions of same file . ie vid and vname
if not os . path . exists ( os . path . join ( db_dir , db_f ) ) :
self . logger . info ( "Downloading bundle '{}' to '{}" . format ( ref , db_f ) )
with open ( db_f , 'wb' ) as f_out :
with remote . checkout ( ref ) as f :
copy_file_or_flo ( f , f_out , cb = cb )
f_out . flush ( )
self . checkin_bundle ( db_f )
b = self . bundle ( ref )
# Should exist now .
b . dataset . data [ 'remote_name' ] = remote . short_name
b . dataset . upstream = remote . url
b . dstate = b . STATES . CHECKEDOUT
b . commit ( )
finally :
if b :
b . progress . close ( )
vid = b . identity . vid
return vid |
def is_finite ( value : Any ) -> bool :
"""Return true if a value is a finite number .""" | return isinstance ( value , int ) or ( isinstance ( value , float ) and isfinite ( value ) ) |
def is_vert_aligned_center ( c ) :
"""Return true if all the components are vertically aligned on their center .
Vertical alignment means that the bounding boxes of each Mention of c
shares a similar x - axis value in the visual rendering of the document . In
this function the similarity of the x - axis value is based on the center of
their bounding boxes .
: param c : The candidate to evaluate
: rtype : boolean""" | return all ( [ _to_span ( c [ i ] ) . sentence . is_visual ( ) and bbox_vert_aligned_center ( bbox_from_span ( _to_span ( c [ i ] ) ) , bbox_from_span ( _to_span ( c [ 0 ] ) ) ) for i in range ( len ( c ) ) ] ) |
def convert ( model , input_features , output_features ) :
"""Convert a _ imputer model to the protobuf spec .
Parameters
model : Imputer
A trained Imputer model .
input _ features : str
Name of the input column .
output _ features : str
Name of the output column .
Returns
model _ spec : An object of type Model _ pb .
Protobuf representation of the model""" | if not ( _HAS_SKLEARN ) :
raise RuntimeError ( 'scikit-learn not found. scikit-learn conversion API is disabled.' )
# Test the scikit - learn model
_sklearn_util . check_expected_type ( model , StandardScaler )
_sklearn_util . check_fitted ( model , lambda m : hasattr ( m , 'mean_' ) )
_sklearn_util . check_fitted ( model , lambda m : hasattr ( m , 'scale_' ) )
# Set the interface params .
spec = _Model_pb2 . Model ( )
spec . specificationVersion = SPECIFICATION_VERSION
spec = _set_transform_interface_params ( spec , input_features , output_features )
# Set the parameters
tr_spec = spec . scaler
for x in model . mean_ :
tr_spec . shiftValue . append ( - x )
for x in model . scale_ :
tr_spec . scaleValue . append ( 1.0 / x )
return _MLModel ( spec ) |
def p_on_goto ( p ) :
"""statement : ON expr goto label _ list""" | expr = make_typecast ( TYPE . ubyte , p [ 2 ] , p . lineno ( 1 ) )
p [ 0 ] = make_sentence ( 'ON_' + p [ 3 ] , expr , * p [ 4 ] ) |
def gas_price_strategy_middleware ( make_request , web3 ) :
"""Includes a gas price using the gas price strategy""" | def middleware ( method , params ) :
if method == 'eth_sendTransaction' :
transaction = params [ 0 ]
if 'gasPrice' not in transaction :
generated_gas_price = web3 . eth . generateGasPrice ( transaction )
if generated_gas_price is not None :
transaction = assoc ( transaction , 'gasPrice' , generated_gas_price )
return make_request ( method , [ transaction ] )
return make_request ( method , params )
return middleware |
def parse_args ( args = None ) :
"""Parse the arguments / options passed to the program on the command line .""" | parse_kwargs = { "description" : 'Download Coursera.org lecture material and resources.' }
conf_file_path = os . path . join ( os . getcwd ( ) , LOCAL_CONF_FILE_NAME )
if os . path . isfile ( conf_file_path ) :
parse_kwargs [ "default_config_files" ] = [ conf_file_path ]
parser = argparse . ArgParser ( ** parse_kwargs )
# Basic options
group_basic = parser . add_argument_group ( 'Basic options' )
group_basic . add_argument ( 'class_names' , action = 'store' , nargs = '*' , help = 'name(s) of the class(es) (e.g. "ml-005")' )
group_basic . add_argument ( '-u' , '--username' , dest = 'username' , action = 'store' , default = None , help = 'username (email) that you use to login to Coursera' )
group_basic . add_argument ( '-p' , '--password' , dest = 'password' , action = 'store' , default = None , help = 'coursera password' )
group_basic . add_argument ( '--jobs' , dest = 'jobs' , action = 'store' , default = 1 , type = int , help = 'number of parallel jobs to use for ' 'downloading resources. (Default: 1)' )
group_basic . add_argument ( '--download-delay' , dest = 'download_delay' , action = 'store' , default = 60 , type = int , help = 'number of seconds to wait before downloading ' 'next course. (Default: 60)' )
group_basic . add_argument ( '-b' , # FIXME : kill this one - letter option
'--preview' , dest = 'preview' , action = 'store_true' , default = False , help = 'get videos from preview pages. (Default: False)' )
group_basic . add_argument ( '--path' , dest = 'path' , action = 'store' , default = '' , help = 'path to where to save the file. (Default: current directory)' )
group_basic . add_argument ( '-sl' , # FIXME : deprecate this option
'--subtitle-language' , dest = 'subtitle_language' , action = 'store' , default = 'all' , help = 'Choose language to download subtitles and transcripts.' '(Default: all) Use special value "all" to download all available.' 'To download subtitles and transcripts of multiple languages,' 'use comma(s) (without spaces) to seperate the names of the languages,' ' i.e., "en,zh-CN".' 'To download subtitles and transcripts of alternative language(s) ' 'if only the current language is not available,' 'put an "|<lang>" for each of the alternative languages after ' 'the current language, i.e., "en|fr,zh-CN|zh-TW|de", and make sure ' 'the parameter are wrapped with quotes when "|" presents.' )
# Selection of material to download
group_material = parser . add_argument_group ( 'Selection of material to download' )
group_material . add_argument ( '--specialization' , dest = 'specialization' , action = 'store_true' , default = False , help = 'treat given class names as specialization names and try to ' 'download its courses, if available. Note that there are name ' 'clashes, e.g. "machine-learning" is both a course and a ' 'specialization (Default: False)' )
group_material . add_argument ( '--only-syllabus' , dest = 'only_syllabus' , action = 'store_true' , default = False , help = 'download only syllabus, skip course content. ' '(Default: False)' )
group_material . add_argument ( '--download-quizzes' , dest = 'download_quizzes' , action = 'store_true' , default = False , help = 'download quiz and exam questions. (Default: False)' )
group_material . add_argument ( '--download-notebooks' , dest = 'download_notebooks' , action = 'store_true' , default = False , help = 'download Python Jupyther Notebooks. (Default: False)' )
group_material . add_argument ( '--about' , # FIXME : should be - - about - course
dest = 'about' , action = 'store_true' , default = False , help = 'download "about" metadata. (Default: False)' )
group_material . add_argument ( '-f' , '--formats' , dest = 'file_formats' , action = 'store' , default = 'all' , help = 'file format extensions to be downloaded in' ' quotes space separated, e.g. "mp4 pdf" ' '(default: special value "all")' )
group_material . add_argument ( '--ignore-formats' , dest = 'ignore_formats' , action = 'store' , default = None , help = 'file format extensions of resources to ignore' ' (default: None)' )
group_material . add_argument ( '-sf' , # FIXME : deprecate this option
'--section_filter' , dest = 'section_filter' , action = 'store' , default = None , help = 'only download sections which contain this' ' regex (default: disabled)' )
group_material . add_argument ( '-lf' , # FIXME : deprecate this option
'--lecture_filter' , dest = 'lecture_filter' , action = 'store' , default = None , help = 'only download lectures which contain this regex' ' (default: disabled)' )
group_material . add_argument ( '-rf' , # FIXME : deprecate this option
'--resource_filter' , dest = 'resource_filter' , action = 'store' , default = None , help = 'only download resources which match this regex' ' (default: disabled)' )
group_material . add_argument ( '--video-resolution' , dest = 'video_resolution' , action = 'store' , default = '540p' , help = 'video resolution to download (default: 540p); ' 'only valid for on-demand courses; ' 'only values allowed: 360p, 540p, 720p' )
group_material . add_argument ( '--disable-url-skipping' , dest = 'disable_url_skipping' , action = 'store_true' , default = False , help = 'disable URL skipping, all URLs will be ' 'downloaded (default: False)' )
# Parameters related to external downloaders
group_external_dl = parser . add_argument_group ( 'External downloaders' )
group_external_dl . add_argument ( '--wget' , dest = 'wget' , action = 'store' , nargs = '?' , const = 'wget' , default = None , help = 'use wget for downloading,' 'optionally specify wget bin' )
group_external_dl . add_argument ( '--curl' , dest = 'curl' , action = 'store' , nargs = '?' , const = 'curl' , default = None , help = 'use curl for downloading,' ' optionally specify curl bin' )
group_external_dl . add_argument ( '--aria2' , dest = 'aria2' , action = 'store' , nargs = '?' , const = 'aria2c' , default = None , help = 'use aria2 for downloading,' ' optionally specify aria2 bin' )
group_external_dl . add_argument ( '--axel' , dest = 'axel' , action = 'store' , nargs = '?' , const = 'axel' , default = None , help = 'use axel for downloading,' ' optionally specify axel bin' )
group_external_dl . add_argument ( '--downloader-arguments' , dest = 'downloader_arguments' , default = '' , help = 'additional arguments passed to the' ' downloader' )
parser . add_argument ( '--list-courses' , dest = 'list_courses' , action = 'store_true' , default = False , help = 'list course names (slugs) and quit. Listed ' 'course names can be put into program arguments' )
parser . add_argument ( '--resume' , dest = 'resume' , action = 'store_true' , default = False , help = 'resume incomplete downloads (default: False)' )
parser . add_argument ( '-o' , '--overwrite' , dest = 'overwrite' , action = 'store_true' , default = False , help = 'whether existing files should be overwritten' ' (default: False)' )
parser . add_argument ( '--verbose-dirs' , dest = 'verbose_dirs' , action = 'store_true' , default = False , help = 'include class name in section directory name' )
parser . add_argument ( '--quiet' , dest = 'quiet' , action = 'store_true' , default = False , help = 'omit as many messages as possible' ' (only printing errors)' )
parser . add_argument ( '-r' , '--reverse' , dest = 'reverse' , action = 'store_true' , default = False , help = 'download sections in reverse order' )
parser . add_argument ( '--combined-section-lectures-nums' , dest = 'combined_section_lectures_nums' , action = 'store_true' , default = False , help = 'include lecture and section name in final files' )
parser . add_argument ( '--unrestricted-filenames' , dest = 'unrestricted_filenames' , action = 'store_true' , default = False , help = 'Do not limit filenames to be ASCII-only' )
# Advanced authentication
group_adv_auth = parser . add_argument_group ( 'Advanced authentication options' )
group_adv_auth . add_argument ( '-c' , '--cookies_file' , dest = 'cookies_file' , action = 'store' , default = None , help = 'full path to the cookies.txt file' )
group_adv_auth . add_argument ( '-n' , '--netrc' , dest = 'netrc' , nargs = '?' , action = 'store' , const = True , default = False , help = 'use netrc for reading passwords, uses default' ' location if no path specified' )
group_adv_auth . add_argument ( '-k' , '--keyring' , dest = 'use_keyring' , action = 'store_true' , default = False , help = 'use keyring provided by operating system to ' 'save and load credentials' )
group_adv_auth . add_argument ( '--clear-cache' , dest = 'clear_cache' , action = 'store_true' , default = False , help = 'clear cached cookies' )
# Advanced miscellaneous options
group_adv_misc = parser . add_argument_group ( 'Advanced miscellaneous options' )
group_adv_misc . add_argument ( '--hook' , dest = 'hooks' , action = 'append' , default = [ ] , help = 'hooks to run when finished' )
group_adv_misc . add_argument ( '-pl' , '--playlist' , dest = 'playlist' , action = 'store_true' , default = False , help = 'generate M3U playlists for course weeks' )
group_adv_misc . add_argument ( '--mathjax-cdn' , dest = 'mathjax_cdn_url' , default = 'https://cdn.mathjax.org/mathjax/latest/MathJax.js' , help = 'the cdn address of MathJax.js' )
# Debug options
group_debug = parser . add_argument_group ( 'Debugging options' )
group_debug . add_argument ( '--skip-download' , dest = 'skip_download' , action = 'store_true' , default = False , help = 'for debugging: skip actual downloading of files' )
group_debug . add_argument ( '--debug' , dest = 'debug' , action = 'store_true' , default = False , help = 'print lots of debug information' )
group_debug . add_argument ( '--cache-syllabus' , dest = 'cache_syllabus' , action = 'store_true' , default = False , help = 'cache course syllabus into a file' )
group_debug . add_argument ( '--version' , dest = 'version' , action = 'store_true' , default = False , help = 'display version and exit' )
group_debug . add_argument ( '-l' , # FIXME : remove short option from rarely used ones
'--process_local_page' , dest = 'local_page' , help = 'uses or creates local cached version of syllabus' ' page' )
# Final parsing of the options
args = parser . parse_args ( args )
# Initialize the logging system first so that other functions
# can use it right away
if args . debug :
logging . basicConfig ( level = logging . DEBUG , format = '%(name)s[%(funcName)s] %(message)s' )
elif args . quiet :
logging . basicConfig ( level = logging . ERROR , format = '%(name)s: %(message)s' )
else :
logging . basicConfig ( level = logging . INFO , format = '%(message)s' )
if class_name_arg_required ( args ) and not args . class_names :
parser . print_usage ( )
logging . error ( 'You must supply at least one class name' )
sys . exit ( 1 )
# show version ?
if args . version : # we use print ( not logging ) function because version may be used
# by some external script while logging may output excessive
# information
print ( __version__ )
sys . exit ( 0 )
# turn list of strings into list
args . downloader_arguments = args . downloader_arguments . split ( )
# turn list of strings into list
args . file_formats = args . file_formats . split ( )
# decode path so we can work properly with cyrillic symbols on different
# versions on Python
args . path = decode_input ( args . path )
# check arguments
if args . use_keyring and args . password :
logging . warning ( '--keyring and --password cannot be specified together' )
args . use_keyring = False
if args . use_keyring and not keyring :
logging . warning ( 'The python module `keyring` not found.' )
args . use_keyring = False
if args . cookies_file and not os . path . exists ( args . cookies_file ) :
logging . error ( 'Cookies file not found: %s' , args . cookies_file )
sys . exit ( 1 )
if not args . cookies_file :
try :
args . username , args . password = get_credentials ( username = args . username , password = args . password , netrc = args . netrc , use_keyring = args . use_keyring )
except CredentialsError as e :
logging . error ( e )
sys . exit ( 1 )
return args |
def get_root_banks ( self ) :
"""Gets the root banks in this bank hierarchy .
return : ( osid . assessment . BankList ) - the root banks
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure occurred
* compliance : mandatory - - This method is must be implemented . *""" | # Implemented from template for
# osid . resource . BinHierarchySession . get _ root _ bins
if self . _catalog_session is not None :
return self . _catalog_session . get_root_catalogs ( )
return BankLookupSession ( self . _proxy , self . _runtime ) . get_banks_by_ids ( list ( self . get_root_bank_ids ( ) ) ) |
def get_file_language ( filename , text = None ) :
"""Get file language from filename""" | ext = osp . splitext ( filename ) [ 1 ]
if ext . startswith ( '.' ) :
ext = ext [ 1 : ]
# file extension with leading dot
language = ext
if not ext :
if text is None :
text , _enc = encoding . read ( filename )
for line in text . splitlines ( ) :
if not line . strip ( ) :
continue
if line . startswith ( '#!' ) :
shebang = line [ 2 : ]
if 'python' in shebang :
language = 'python'
else :
break
return language |
def token ( cls : Type [ ConditionType ] , left : Any , op : Optional [ Any ] = None , right : Optional [ Any ] = None ) -> ConditionType :
"""Return Condition instance from arguments and Operator
: param left : Left argument
: param op : Operator
: param right : Right argument
: return :""" | condition = cls ( )
condition . left = left
if op :
condition . op = op
if right :
condition . right = right
return condition |
async def _async_connect ( self ) : # pragma : no cover
"""connect and authenticate to the XMPP server . Async mode .""" | try :
self . conn_coro = self . client . connected ( )
aenter = type ( self . conn_coro ) . __aenter__ ( self . conn_coro )
self . stream = await aenter
logger . info ( f"Agent {str(self.jid)} connected and authenticated." )
except aiosasl . AuthenticationFailure :
raise AuthenticationFailure ( "Could not authenticate the agent. Check user and password or use auto_register=True" ) |
def single ( self , trigger_id , full = False ) :
"""Get an existing ( full ) trigger definition .
: param trigger _ id : Trigger definition id to be retrieved .
: param full : Fetch the full definition , default is False .
: return : Trigger of FullTrigger depending on the full parameter value .""" | if full :
returned_dict = self . _get ( self . _service_url ( [ 'triggers' , 'trigger' , trigger_id ] ) )
return FullTrigger ( returned_dict )
else :
returned_dict = self . _get ( self . _service_url ( [ 'triggers' , trigger_id ] ) )
return Trigger ( returned_dict ) |
def status ( self , ** kwargs ) :
"""Retrieve the status of a service ` name ` or all services
for the current init system .""" | self . services = dict ( init_system = self . init_system , services = [ ] ) |
def register_simulants ( self , simulants : pd . DataFrame ) :
"""Adds new simulants to the randomness mapping .
Parameters
simulants :
A table with state data representing the new simulants . Each simulant should
pass through this function exactly once .
Raises
RandomnessError :
If the provided table does not contain all key columns specified in the configuration .""" | if not all ( k in simulants . columns for k in self . _key_columns ) :
raise RandomnessError ( "The simulants dataframe does not have all specified key_columns." )
self . _key_mapping . update ( simulants . set_index ( self . _key_columns ) . index ) |
def values ( self ) :
"""Return a list themeables sorted in reverse based
on the their depth in the inheritance hierarchy .
The sorting is key applying and merging the themeables
so that they do not clash i . e : class : ` axis _ line `
applied before : class : ` axis _ line _ x ` .""" | def key ( th ) :
return len ( th . __class__ . __mro__ )
return sorted ( dict . values ( self ) , key = key , reverse = True ) |
def metas ( self , prefix = None , limit = None , delimiter = None ) :
"""RETURN THE METADATA DESCRIPTORS FOR EACH KEY""" | limit = coalesce ( limit , TOO_MANY_KEYS )
keys = self . bucket . list ( prefix = prefix , delimiter = delimiter )
prefix_len = len ( prefix )
output = [ ]
for i , k in enumerate ( k for k in keys if len ( k . key ) == prefix_len or k . key [ prefix_len ] in [ "." , ":" ] ) :
output . append ( { "key" : strip_extension ( k . key ) , "etag" : convert . quote2string ( k . etag ) , "expiry_date" : Date ( k . expiry_date ) , "last_modified" : Date ( k . last_modified ) } )
if i >= limit :
break
return wrap ( output ) |
def next ( self ) :
"""generator function to yield multipart / form - data representation
of parameters""" | if self . param_iter is not None :
try :
block = self . param_iter . next ( )
self . current += len ( block )
if self . cb :
self . cb ( self . p , self . current , self . total )
return block
except StopIteration :
self . p = None
self . param_iter = None
if self . i is None :
raise StopIteration
elif self . i >= len ( self . params ) :
self . param_iter = None
self . p = None
self . i = None
block = "--%s--\r\n" % self . boundary
self . current += len ( block )
if self . cb :
self . cb ( self . p , self . current , self . total )
return block
self . p = self . params [ self . i ]
self . param_iter = self . p . iter_encode ( self . boundary )
self . i += 1
return self . next ( ) |
def defrise ( space , nellipses = 8 , alternating = False , min_pt = None , max_pt = None ) :
"""Phantom with regularily spaced ellipses .
This phantom is often used to verify cone - beam algorithms .
Parameters
space : ` DiscreteLp `
Space in which the phantom should be created , must be 2 - or
3 - dimensional .
nellipses : int , optional
Number of ellipses . If more ellipses are used , each ellipse becomes
thinner .
alternating : bool , optional
True if the ellipses should have alternating densities ( + 1 , - 1 ) ,
otherwise all ellipses have value + 1.
min _ pt , max _ pt : array - like , optional
If provided , use these vectors to determine the bounding box of the
phantom instead of ` ` space . min _ pt ` ` and ` ` space . max _ pt ` ` .
It is currently required that ` ` min _ pt > = space . min _ pt ` ` and
` ` max _ pt < = space . max _ pt ` ` , i . e . , shifting or scaling outside the
original space is not allowed .
Providing one of them results in a shift , e . g . , for ` ` min _ pt ` ` : :
new _ min _ pt = min _ pt
new _ max _ pt = space . max _ pt + ( min _ pt - space . min _ pt )
Providing both results in a scaled version of the phantom .
Returns
phantom : ` ` space ` ` element
The generated phantom in ` ` space ` ` .
See Also
odl . phantom . transmission . shepp _ logan""" | ellipses = defrise_ellipses ( space . ndim , nellipses = nellipses , alternating = alternating )
return ellipsoid_phantom ( space , ellipses , min_pt , max_pt ) |
def parse ( self , text ) :
"""Do the parsing .""" | text = tounicode ( text , encoding = "utf-8" )
result , i = self . _parse ( text , 0 )
if text [ i : ] . strip ( ) :
self . _fail ( "Unexpected trailing content" , text , i )
return result |
def _validate_partition_boundary ( boundary ) :
'''Ensure valid partition boundaries are supplied .''' | boundary = six . text_type ( boundary )
match = re . search ( r'^([\d.]+)(\D*)$' , boundary )
if match :
unit = match . group ( 2 )
if not unit or unit in VALID_UNITS :
return
raise CommandExecutionError ( 'Invalid partition boundary passed: "{0}"' . format ( boundary ) ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.