signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def _build_first_tree ( self ) :
"""Build the first tree with n - 1 variable ."""
|
# Prim ' s algorithm
neg_tau = - 1.0 * abs ( self . tau_matrix )
X = { 0 }
while len ( X ) != self . n_nodes :
adj_set = set ( )
for x in X :
for k in range ( self . n_nodes ) :
if k not in X and k != x :
adj_set . add ( ( x , k ) )
# find edge with maximum
edge = sorted ( adj_set , key = lambda e : neg_tau [ e [ 0 ] ] [ e [ 1 ] ] ) [ 0 ]
name , theta = Bivariate . select_copula ( self . u_matrix [ : , ( edge [ 0 ] , edge [ 1 ] ) ] )
left , right = sorted ( [ edge [ 0 ] , edge [ 1 ] ] )
new_edge = Edge ( len ( X ) - 1 , left , right , name , theta )
new_edge . tau = self . tau_matrix [ edge [ 0 ] , edge [ 1 ] ]
self . edges . append ( new_edge )
X . add ( edge [ 1 ] )
|
def _list_vlans_by_name ( self , name ) :
"""Returns a list of IDs of VLANs which match the given VLAN name .
: param string name : a VLAN name
: returns : List of matching IDs"""
|
results = self . list_vlans ( name = name , mask = 'id' )
return [ result [ 'id' ] for result in results ]
|
def get_iex_dividends ( start = None , ** kwargs ) :
"""MOVED to iexfinance . refdata . get _ iex _ dividends"""
|
import warnings
warnings . warn ( WNG_MSG % ( "get_iex_dividends" , "refdata.get_iex_dividends" ) )
return Dividends ( start = start , ** kwargs ) . fetch ( )
|
def expect_column_values_to_be_dateutil_parseable ( self , column , mostly = None , result_format = None , include_config = False , catch_exceptions = None , meta = None ) :
"""Expect column entries to be parseable using dateutil .
expect _ column _ values _ to _ be _ dateutil _ parseable is a : func : ` column _ map _ expectation < great _ expectations . data _ asset . dataset . Dataset . column _ map _ expectation > ` .
Args :
column ( str ) : The column name .
Keyword Args :
mostly ( None or a float between 0 and 1 ) : Return ` " success " : True ` if at least mostly percent of values match the expectation . For more detail , see : ref : ` mostly ` .
Other Parameters :
result _ format ( str or None ) : Which output mode to use : ` BOOLEAN _ ONLY ` , ` BASIC ` , ` COMPLETE ` , or ` SUMMARY ` .
For more detail , see : ref : ` result _ format < result _ format > ` .
include _ config ( boolean ) : If True , then include the expectation config as part of the result object . For more detail , see : ref : ` include _ config ` .
catch _ exceptions ( boolean or None ) : If True , then catch exceptions and include them as part of the result object . For more detail , see : ref : ` catch _ exceptions ` .
meta ( dict or None ) : A JSON - serializable dictionary ( nesting allowed ) that will be included in the output without modification . For more detail , see : ref : ` meta ` .
Returns :
A JSON - serializable expectation result object .
Exact fields vary depending on the values passed to : ref : ` result _ format < result _ format > ` and
: ref : ` include _ config ` , : ref : ` catch _ exceptions ` , and : ref : ` meta ` ."""
|
raise NotImplementedError
|
def run_with_pmids ( model_path , pmids ) :
"""Run with given list of PMIDs ."""
|
from indra . tools . machine . machine import run_with_pmids_helper
run_with_pmids_helper ( model_path , pmids )
|
def sizeof_fmt ( num ) :
"""Turn number of bytes into human - readable str .
Parameters
num : int
The number of bytes .
Returns
size : str
The size in human - readable format ."""
|
units = [ 'bytes' , 'kB' , 'MB' , 'GB' , 'TB' , 'PB' ]
decimals = [ 0 , 0 , 1 , 2 , 2 , 2 ]
if num > 1 :
exponent = min ( int ( log ( num , 1024 ) ) , len ( units ) - 1 )
quotient = float ( num ) / 1024 ** exponent
unit = units [ exponent ]
num_decimals = decimals [ exponent ]
format_string = '{0:.%sf} {1}' % ( num_decimals )
return format_string . format ( quotient , unit )
if num == 0 :
return '0 bytes'
if num == 1 :
return '1 byte'
|
def static_data ( fn ) :
"""Static file handler . This functions accesses all static files in ` ` static ` `
directory ."""
|
file_path = os . path . normpath ( fn )
full_path = os . path . join ( STATIC_PATH , file_path )
if not os . path . exists ( full_path ) :
abort ( 404 , "Soubor '%s' neexistuje!" % fn )
return static_file ( file_path , STATIC_PATH )
|
def enq ( self , task ) :
"""Enqueues a fetch task to the pool of workers . This will raise
a RuntimeError if the pool is stopped or in the process of
stopping .
: param task : the Task object
: type task : Task or PutTask"""
|
if not self . _stop . is_set ( ) :
self . _inq . put ( task )
else :
raise RuntimeError ( "Attempted to enqueue an operation while " "multi pool was shutdown!" )
|
def next ( self ) :
"""Send signal to resume playback at the paused offset"""
|
self . _response [ 'shouldEndSession' ] = True
self . _response [ 'action' ] [ 'audio' ] [ 'interface' ] = 'next'
self . _response [ 'action' ] [ 'audio' ] [ 'sources' ] = [ ]
return self
|
def wait ( self , timeout = None ) :
"""Waits for the client to stop its loop"""
|
self . __stopped . wait ( timeout )
return self . __stopped . is_set ( )
|
def get_network_instances ( self , name = "" ) :
"""get _ network _ instances implementation for EOS ."""
|
output = self . _show_vrf ( )
vrfs = { }
all_vrf_interfaces = { }
for vrf in output :
if ( vrf . get ( "route_distinguisher" , "" ) == "<not set>" or vrf . get ( "route_distinguisher" , "" ) == "None" ) :
vrf [ "route_distinguisher" ] = ""
else :
vrf [ "route_distinguisher" ] = py23_compat . text_type ( vrf [ "route_distinguisher" ] )
interfaces = { }
for interface_raw in vrf . get ( "interfaces" , [ ] ) :
interface = interface_raw . split ( "," )
for line in interface :
if line . strip ( ) != "" :
interfaces [ py23_compat . text_type ( line . strip ( ) ) ] = { }
all_vrf_interfaces [ py23_compat . text_type ( line . strip ( ) ) ] = { }
vrfs [ py23_compat . text_type ( vrf [ "name" ] ) ] = { "name" : py23_compat . text_type ( vrf [ "name" ] ) , "type" : "L3VRF" , "state" : { "route_distinguisher" : vrf [ "route_distinguisher" ] } , "interfaces" : { "interface" : interfaces } , }
all_interfaces = self . get_interfaces_ip ( ) . keys ( )
vrfs [ "default" ] = { "name" : "default" , "type" : "DEFAULT_INSTANCE" , "state" : { "route_distinguisher" : "" } , "interfaces" : { "interface" : { k : { } for k in all_interfaces if k not in all_vrf_interfaces . keys ( ) } } , }
if name :
if name in vrfs :
return { py23_compat . text_type ( name ) : vrfs [ name ] }
return { }
else :
return vrfs
|
def in_hours ( self , office = None , when = None ) :
"""Finds if it is business hours in the given office .
: param office : Office ID to look up , or None to check if any office is in business hours .
: type office : str or None
: param datetime . datetime when : When to check the office is open , or None for now .
: returns : True if it is business hours , False otherwise .
: rtype : bool
: raises KeyError : If the office is unknown ."""
|
if when == None :
when = datetime . now ( tz = utc )
if office == None :
for office in self . offices . itervalues ( ) :
if office . in_hours ( when ) :
return True
return False
else : # check specific office
return self . offices [ office ] . in_hours ( when )
|
def save_report ( self , name , address = True ) :
"""Save Compare report in . comp ( flat file format ) .
: param name : filename
: type name : str
: param address : flag for address return
: type address : bool
: return : saving Status as dict { " Status " : bool , " Message " : str }"""
|
try :
message = None
file = open ( name + ".comp" , "w" )
report = compare_report_print ( self . sorted , self . scores , self . best_name )
file . write ( report )
file . close ( )
if address :
message = os . path . join ( os . getcwd ( ) , name + ".comp" )
return { "Status" : True , "Message" : message }
except Exception as e :
return { "Status" : False , "Message" : str ( e ) }
|
def to_bool ( self , value ) :
"""Converts a sheet string value to a boolean value .
Needed because of utf - 8 conversions"""
|
try :
value = value . lower ( )
except :
pass
try :
value = value . encode ( 'utf-8' )
except :
pass
try :
value = int ( value )
except :
pass
if value in ( 'true' , 1 ) :
return True
else :
return False
|
def get_orthogonal_selection ( self , selection , out = None , fields = None ) :
"""Retrieve data by making a selection for each dimension of the array . For
example , if an array has 2 dimensions , allows selecting specific rows and / or
columns . The selection for each dimension can be either an integer ( indexing a
single item ) , a slice , an array of integers , or a Boolean array where True
values indicate a selection .
Parameters
selection : tuple
A selection for each dimension of the array . May be any combination of int ,
slice , integer array or Boolean array .
out : ndarray , optional
If given , load the selected data directly into this array .
fields : str or sequence of str , optional
For arrays with a structured dtype , one or more fields can be specified to
extract data for .
Returns
out : ndarray
A NumPy array containing the data for the requested selection .
Examples
Setup a 2 - dimensional array : :
> > > import zarr
> > > import numpy as np
> > > z = zarr . array ( np . arange ( 100 ) . reshape ( 10 , 10 ) )
Retrieve rows and columns via any combination of int , slice , integer array and / or
Boolean array : :
> > > z . get _ orthogonal _ selection ( ( [ 1 , 4 ] , slice ( None ) ) )
array ( [ [ 10 , 11 , 12 , 13 , 14 , 15 , 16 , 17 , 18 , 19 ] ,
[40 , 41 , 42 , 43 , 44 , 45 , 46 , 47 , 48 , 49 ] ] )
> > > z . get _ orthogonal _ selection ( ( slice ( None ) , [ 1 , 4 ] ) )
array ( [ [ 1 , 4 ] ,
[11 , 14 ] ,
[21 , 24 ] ,
[31 , 34 ] ,
[41 , 44 ] ,
[51 , 54 ] ,
[61 , 64 ] ,
[71 , 74 ] ,
[81 , 84 ] ,
[91 , 94 ] ] )
> > > z . get _ orthogonal _ selection ( ( [ 1 , 4 ] , [ 1 , 4 ] ) )
array ( [ [ 11 , 14 ] ,
[41 , 44 ] ] )
> > > sel = np . zeros ( z . shape [ 0 ] , dtype = bool )
> > > sel [ 1 ] = True
> > > sel [ 4 ] = True
> > > z . get _ orthogonal _ selection ( ( sel , sel ) )
array ( [ [ 11 , 14 ] ,
[41 , 44 ] ] )
For convenience , the orthogonal selection functionality is also available via the
` oindex ` property , e . g . : :
> > > z . oindex [ [ 1 , 4 ] , : ]
array ( [ [ 10 , 11 , 12 , 13 , 14 , 15 , 16 , 17 , 18 , 19 ] ,
[40 , 41 , 42 , 43 , 44 , 45 , 46 , 47 , 48 , 49 ] ] )
> > > z . oindex [ : , [ 1 , 4 ] ]
array ( [ [ 1 , 4 ] ,
[11 , 14 ] ,
[21 , 24 ] ,
[31 , 34 ] ,
[41 , 44 ] ,
[51 , 54 ] ,
[61 , 64 ] ,
[71 , 74 ] ,
[81 , 84 ] ,
[91 , 94 ] ] )
> > > z . oindex [ [ 1 , 4 ] , [ 1 , 4 ] ]
array ( [ [ 11 , 14 ] ,
[41 , 44 ] ] )
> > > sel = np . zeros ( z . shape [ 0 ] , dtype = bool )
> > > sel [ 1 ] = True
> > > sel [ 4 ] = True
> > > z . oindex [ sel , sel ]
array ( [ [ 11 , 14 ] ,
[41 , 44 ] ] )
Notes
Orthogonal indexing is also known as outer indexing .
Slices with step > 1 are supported , but slices with negative step are not .
See Also
get _ basic _ selection , set _ basic _ selection , get _ mask _ selection , set _ mask _ selection ,
get _ coordinate _ selection , set _ coordinate _ selection , set _ orthogonal _ selection ,
vindex , oindex , _ _ getitem _ _ , _ _ setitem _ _"""
|
# refresh metadata
if not self . _cache_metadata :
self . _load_metadata ( )
# check args
check_fields ( fields , self . _dtype )
# setup indexer
indexer = OrthogonalIndexer ( selection , self )
return self . _get_selection ( indexer = indexer , out = out , fields = fields )
|
def update_firmware ( node ) :
"""Performs SUM based firmware update on the node .
This method performs SUM firmware update by mounting the
SPP ISO on the node . It performs firmware update on all or
some of the firmware components .
: param node : A node object of type dict .
: returns : Operation Status string .
: raises : SUMOperationError , when the vmedia device is not found or
when the mount operation fails or when the image validation fails .
: raises : IloConnectionError , when the iLO connection fails .
: raises : IloError , when vmedia eject or insert operation fails ."""
|
sum_update_iso = node [ 'clean_step' ] [ 'args' ] . get ( 'url' )
# Validates the http image reference for SUM update ISO .
try :
utils . validate_href ( sum_update_iso )
except exception . ImageRefValidationFailed as e :
raise exception . SUMOperationError ( reason = e )
# Ejects the CDROM device in the iLO and inserts the SUM update ISO
# to the CDROM device .
info = node . get ( 'driver_info' )
ilo_object = client . IloClient ( info . get ( 'ilo_address' ) , info . get ( 'ilo_username' ) , info . get ( 'ilo_password' ) )
ilo_object . eject_virtual_media ( 'CDROM' )
ilo_object . insert_virtual_media ( sum_update_iso , 'CDROM' )
# Waits for the OS to detect the disk and update the label file . SPP ISO
# is identified by matching its label .
time . sleep ( WAIT_TIME_DISK_LABEL_TO_BE_VISIBLE )
vmedia_device_dir = "/dev/disk/by-label/"
for file in os . listdir ( vmedia_device_dir ) :
if fnmatch . fnmatch ( file , 'SPP*' ) :
vmedia_device_file = os . path . join ( vmedia_device_dir , file )
if not os . path . exists ( vmedia_device_file ) :
msg = "Unable to find the virtual media device for SUM"
raise exception . SUMOperationError ( reason = msg )
# Validates the SPP ISO image for any file corruption using the checksum
# of the ISO file .
expected_checksum = node [ 'clean_step' ] [ 'args' ] . get ( 'checksum' )
try :
utils . verify_image_checksum ( vmedia_device_file , expected_checksum )
except exception . ImageRefValidationFailed as e :
raise exception . SUMOperationError ( reason = e )
# Mounts SPP ISO on a temporary directory .
vmedia_mount_point = tempfile . mkdtemp ( )
try :
try :
processutils . execute ( "mount" , vmedia_device_file , vmedia_mount_point )
except processutils . ProcessExecutionError as e :
msg = ( "Unable to mount virtual media device %(device)s: " "%(error)s" % { 'device' : vmedia_device_file , 'error' : e } )
raise exception . SUMOperationError ( reason = msg )
# Executes the SUM based firmware update by passing the ' smartupdate '
# executable path if exists else ' hpsum ' executable path and the
# components specified ( if any ) .
sum_file_path = os . path . join ( vmedia_mount_point , SUM_LOCATION )
if not os . path . exists ( sum_file_path ) :
sum_file_path = os . path . join ( vmedia_mount_point , HPSUM_LOCATION )
components = node [ 'clean_step' ] [ 'args' ] . get ( 'components' )
result = _execute_sum ( sum_file_path , vmedia_mount_point , components = components )
processutils . trycmd ( "umount" , vmedia_mount_point )
finally :
shutil . rmtree ( vmedia_mount_point , ignore_errors = True )
return result
|
def text_pb ( tag , data , description = None ) :
"""Create a text tf . Summary protobuf .
Arguments :
tag : String tag for the summary .
data : A Python bytestring ( of type bytes ) , a Unicode string , or a numpy data
array of those types .
description : Optional long - form description for this summary , as a ` str ` .
Markdown is supported . Defaults to empty .
Raises :
TypeError : If the type of the data is unsupported .
Returns :
A ` tf . Summary ` protobuf object ."""
|
try :
tensor = tensor_util . make_tensor_proto ( data , dtype = np . object )
except TypeError as e :
raise TypeError ( 'tensor must be of type string' , e )
summary_metadata = metadata . create_summary_metadata ( display_name = None , description = description )
summary = summary_pb2 . Summary ( )
summary . value . add ( tag = tag , metadata = summary_metadata , tensor = tensor )
return summary
|
def additions_install ( ** kwargs ) :
'''Install VirtualBox Guest Additions . Uses the CD , connected by VirtualBox .
To connect VirtualBox Guest Additions via VirtualBox graphical interface
press ' Host + D ' ( ' Host ' is usually ' Right Ctrl ' ) .
See https : / / www . virtualbox . org / manual / ch04 . html # idp52733088 for more details .
CLI Example :
. . code - block : : bash
salt ' * ' vbox _ guest . additions _ install
salt ' * ' vbox _ guest . additions _ install reboot = True
salt ' * ' vbox _ guest . additions _ install upgrade _ os = True
: param reboot : reboot computer to complete installation
: type reboot : bool
: param upgrade _ os : upgrade OS ( to ensure the latests version of kernel and developer tools are installed )
: type upgrade _ os : bool
: return : version of VirtualBox Guest Additions or string with error'''
|
with _additions_mounted ( ) as mount_point :
kernel = __grains__ . get ( 'kernel' , '' )
if kernel == 'Linux' :
return _additions_install_linux ( mount_point , ** kwargs )
|
def _ParseTimestamp ( self , parser_mediator , row ) :
"""Provides a timestamp for the given row .
If the Trend Micro log comes from a version that provides a POSIX timestamp ,
use that directly ; it provides the advantages of UTC and of second
precision . Otherwise fall back onto the local - timezone date and time .
Args :
parser _ mediator ( ParserMediator ) : mediates interactions between parsers
and other components , such as storage and dfvfs .
row ( dict [ str , str ] ) : fields of a single row , as specified in COLUMNS .
Returns :
dfdatetime . interface . DateTimeValue : date and time value ."""
|
timestamp = row . get ( 'timestamp' , None )
if timestamp is not None :
try :
timestamp = int ( timestamp , 10 )
except ( ValueError , TypeError ) :
parser_mediator . ProduceExtractionWarning ( 'Unable to parse timestamp value: {0!s}' . format ( timestamp ) )
return dfdatetime_posix_time . PosixTime ( timestamp = timestamp )
# The timestamp is not available ; parse the local date and time instead .
try :
return self . _ConvertToTimestamp ( row [ 'date' ] , row [ 'time' ] )
except ValueError as exception :
parser_mediator . ProduceExtractionWarning ( ( 'Unable to parse time string: "{0:s} {1:s}" with error: ' '{2!s}' ) . format ( repr ( row [ 'date' ] ) , repr ( row [ 'time' ] ) , exception ) )
|
def is_presence_handler ( type_ , from_ , cb ) :
"""Deprecated alias of : func : ` . dispatcher . is _ presence _ handler ` .
. . deprecated : : 0.9"""
|
import aioxmpp . dispatcher
return aioxmpp . dispatcher . is_presence_handler ( type_ , from_ , cb )
|
def write_bits ( self , * args ) :
'''Write multiple bits in a single byte field . The bits will be written in
little - endian order , but should be supplied in big endian order . Will
raise ValueError when more than 8 arguments are supplied .
write _ bits ( True , False ) = > 0x02'''
|
# Would be nice to make this a bit smarter
if len ( args ) > 8 :
raise ValueError ( "Can only write 8 bits at a time" )
self . _output_buffer . append ( chr ( reduce ( lambda x , y : xor ( x , args [ y ] << y ) , xrange ( len ( args ) ) , 0 ) ) )
return self
|
def RemoveScanNode ( self , path_spec ) :
"""Removes a scan node of a certain path specification .
Args :
path _ spec ( PathSpec ) : path specification .
Returns :
SourceScanNode : parent scan node or None if not available .
Raises :
RuntimeError : if the scan node has sub nodes ."""
|
scan_node = self . _scan_nodes . get ( path_spec , None )
if not scan_node :
return None
if scan_node . sub_nodes :
raise RuntimeError ( 'Scan node has sub nodes.' )
parent_scan_node = scan_node . parent_node
if parent_scan_node :
parent_scan_node . sub_nodes . remove ( scan_node )
if path_spec == self . _root_path_spec :
self . _root_path_spec = None
del self . _scan_nodes [ path_spec ]
if path_spec . IsFileSystem ( ) :
del self . _file_system_scan_nodes [ path_spec ]
return parent_scan_node
|
def can_query_state_for_block ( self , block_identifier : BlockSpecification ) -> bool :
"""Returns if the provided block identifier is safe enough to query chain
state for . If it ' s close to the state pruning blocks then state should
not be queried .
More info : https : / / github . com / raiden - network / raiden / issues / 3566."""
|
latest_block_number = self . block_number ( )
preconditions_block = self . web3 . eth . getBlock ( block_identifier )
preconditions_block_number = int ( preconditions_block [ 'number' ] )
difference = latest_block_number - preconditions_block_number
return difference < constants . NO_STATE_QUERY_AFTER_BLOCKS
|
def setup_parser ( sub_parsers ) :
"""Sets up the command line parser for the * run * subprogram and adds it to * sub _ parsers * ."""
|
parser = sub_parsers . add_parser ( "run" , prog = "law run" , description = "Run a task with" " configurable parameters. See http://luigi.rtfd.io/en/stable/running_luigi.html for more" " info." )
parser . add_argument ( "task_family" , help = "a task family registered in the task database file or" " a module and task class in the format <module>.<class>" )
parser . add_argument ( "parameter" , nargs = "*" , help = "task parameters" )
|
def collect_completions ( self , active_parsers , parsed_args , cword_prefix , debug ) :
"""Visits the active parsers and their actions , executes their completers or introspects them to collect their
option strings . Returns the resulting completions as a list of strings .
This method is exposed for overriding in subclasses ; there is no need to use it directly ."""
|
completions = [ ]
debug ( "all active parsers:" , active_parsers )
active_parser = active_parsers [ - 1 ]
debug ( "active_parser:" , active_parser )
if self . always_complete_options or ( len ( cword_prefix ) > 0 and cword_prefix [ 0 ] in active_parser . prefix_chars ) :
completions += self . _get_option_completions ( active_parser , cword_prefix )
debug ( "optional options:" , completions )
next_positional = self . _get_next_positional ( )
debug ( "next_positional:" , next_positional )
if isinstance ( next_positional , argparse . _SubParsersAction ) :
completions += self . _get_subparser_completions ( next_positional , cword_prefix )
completions = self . _complete_active_option ( active_parser , next_positional , cword_prefix , parsed_args , completions )
debug ( "active options:" , completions )
debug ( "display completions:" , self . _display_completions )
return completions
|
def rollback_savepoint ( self , savepoint ) :
"""Rolls back to the given savepoint .
: param savepoint : the name of the savepoint to rollback to
: raise : pydbal . exception . DBALConnectionError"""
|
if not self . _platform . is_savepoints_supported ( ) :
raise DBALConnectionError . savepoints_not_supported ( )
self . ensure_connected ( )
self . _platform . rollback_savepoint ( savepoint )
|
def get_pr_checks ( pr : PullRequestDetails ) -> Dict [ str , Any ] :
"""References :
https : / / developer . github . com / v3 / checks / runs / # list - check - runs - for - a - specific - ref"""
|
url = ( "https://api.github.com/repos/{}/{}/commits/{}/check-runs" "?access_token={}" . format ( pr . repo . organization , pr . repo . name , pr . branch_sha , pr . repo . access_token ) )
response = requests . get ( url , headers = { 'Accept' : 'application/vnd.github.antiope-preview+json' } )
if response . status_code != 200 :
raise RuntimeError ( 'Get check-runs failed. Code: {}. Content: {}.' . format ( response . status_code , response . content ) )
return json . JSONDecoder ( ) . decode ( response . content . decode ( ) )
|
def flush ( self , * args , ** kwgs ) :
"""Before ` RequestHandler . flush ` was called , we got the final _ write _ buffer .
This method will not be called in wsgi mode"""
|
if settings [ 'LOG_RESPONSE' ] and not self . _status_code == 500 :
log_response ( self )
super ( BaseHandler , self ) . flush ( * args , ** kwgs )
|
def phone_text_subs ( ) :
"""Gets a dictionary of dictionaries that each contain alphabetic number manifestations mapped to their actual
Number value .
Returns :
dictionary of dictionaries containing Strings mapped to Numbers"""
|
Small = { 'zero' : 0 , 'zer0' : 0 , 'one' : 1 , 'two' : 2 , 'three' : 3 , 'four' : 4 , 'fuor' : 4 , 'five' : 5 , 'fith' : 5 , 'six' : 6 , 'seven' : 7 , 'sven' : 7 , 'eight' : 8 , 'nine' : 9 , 'ten' : 10 , 'eleven' : 11 , 'twelve' : 12 , 'thirteen' : 13 , 'fourteen' : 14 , 'fifteen' : 15 , 'sixteen' : 16 , 'seventeen' : 17 , 'eighteen' : 18 , 'nineteen' : 19 , 'twenty' : 20 , 'thirty' : 30 , 'forty' : 40 , 'fifty' : 50 , 'sixty' : 60 , 'seventy' : 70 , 'eighty' : 80 , 'ninety' : 90 , 'oh' : 0 }
Magnitude = { 'thousand' : 000 , 'million' : 000000 , }
Others = { '!' : 1 , 'o' : 0 , 'l' : 1 , 'i' : 1 }
output = { }
output [ 'Small' ] = Small
output [ 'Magnitude' ] = Magnitude
output [ 'Others' ] = Others
return output
|
def lru_cache ( maxsize = 128 , key_fn = None ) :
"""Decorator that adds an LRU cache of size maxsize to the decorated function .
maxsize is the number of different keys cache can accomodate .
key _ fn is the function that builds key from args . The default key function
creates a tuple out of args and kwargs . If you use the default , there is no reason
not to use functools . lru _ cache directly .
Possible use cases :
- Your cache key is very large , so you don ' t want to keep the whole key in memory .
- The function takes some arguments that don ' t affect the result ."""
|
def decorator ( fn ) :
cache = LRUCache ( maxsize )
argspec = inspect2 . getfullargspec ( fn )
arg_names = argspec . args [ 1 : ] + argspec . kwonlyargs
# remove self
kwargs_defaults = get_kwargs_defaults ( argspec )
cache_key = key_fn
if cache_key is None :
def cache_key ( args , kwargs ) :
return get_args_tuple ( args , kwargs , arg_names , kwargs_defaults )
@ functools . wraps ( fn )
def wrapper ( * args , ** kwargs ) :
key = cache_key ( args , kwargs )
try :
return cache [ key ]
except KeyError :
value = fn ( * args , ** kwargs )
cache [ key ] = value
return value
wrapper . clear = cache . clear
return wrapper
return decorator
|
def mfpt ( totflux , pi , qminus ) :
r"""Mean first passage time for reaction A to B .
Parameters
totflux : float
The total flux between reactant and product
pi : ( M , ) ndarray
Stationary distribution
qminus : ( M , ) ndarray
Backward comittor
Returns
tAB : float
The mean first - passage time for the A to B reaction
See also
rate
Notes
Equal to the inverse rate , see [ 1 ] .
References
. . [ 1 ] F . Noe , Ch . Schuette , E . Vanden - Eijnden , L . Reich and
T . Weikl : Constructing the Full Ensemble of Folding Pathways
from Short Off - Equilibrium Simulations .
Proc . Natl . Acad . Sci . USA , 106 , 19011-19016 ( 2009)"""
|
return dense . tpt . mfpt ( totflux , pi , qminus )
|
def nanvar ( values , axis = None , skipna = True , ddof = 1 , mask = None ) :
"""Compute the variance along given axis while ignoring NaNs
Parameters
values : ndarray
axis : int , optional
skipna : bool , default True
ddof : int , default 1
Delta Degrees of Freedom . The divisor used in calculations is N - ddof ,
where N represents the number of elements .
mask : ndarray [ bool ] , optional
nan - mask if known
Returns
result : float
Unless input is a float array , in which case use the same
precision as the input array .
Examples
> > > import pandas . core . nanops as nanops
> > > s = pd . Series ( [ 1 , np . nan , 2 , 3 ] )
> > > nanops . nanvar ( s )
1.0"""
|
values = com . values_from_object ( values )
dtype = values . dtype
if mask is None :
mask = isna ( values )
if is_any_int_dtype ( values ) :
values = values . astype ( 'f8' )
values [ mask ] = np . nan
if is_float_dtype ( values ) :
count , d = _get_counts_nanvar ( mask , axis , ddof , values . dtype )
else :
count , d = _get_counts_nanvar ( mask , axis , ddof )
if skipna :
values = values . copy ( )
np . putmask ( values , mask , 0 )
# xref GH10242
# Compute variance via two - pass algorithm , which is stable against
# cancellation errors and relatively accurate for small numbers of
# observations .
# See https : / / en . wikipedia . org / wiki / Algorithms _ for _ calculating _ variance
avg = _ensure_numeric ( values . sum ( axis = axis , dtype = np . float64 ) ) / count
if axis is not None :
avg = np . expand_dims ( avg , axis )
sqr = _ensure_numeric ( ( avg - values ) ** 2 )
np . putmask ( sqr , mask , 0 )
result = sqr . sum ( axis = axis , dtype = np . float64 ) / d
# Return variance as np . float64 ( the datatype used in the accumulator ) ,
# unless we were dealing with a float array , in which case use the same
# precision as the original values array .
if is_float_dtype ( dtype ) :
result = result . astype ( dtype )
return _wrap_results ( result , values . dtype )
|
def download_file ( self , file_path , range = None ) :
"""Download a file from Telegram servers"""
|
headers = { "range" : range } if range else None
url = "{0}/file/bot{1}/{2}" . format ( API_URL , self . api_token , file_path )
return self . session . get ( url , headers = headers , proxy = self . proxy , proxy_auth = self . proxy_auth )
|
def text_to_edtf_date ( text ) :
"""Return EDTF string equivalent of a given natural language date string .
The approach here is to parse the text twice , with different default
dates . Then compare the results to see what differs - the parts that
differ are undefined ."""
|
if not text :
return
t = text . lower ( )
result = ''
for reject_re in REJECT_RULES :
if re . match ( reject_re , t ) :
return
# matches on ' 1800s ' . Needs to happen before is _ decade .
could_be_century = re . findall ( r'(\d{2}00)s' , t )
# matches on ' 1800s ' and ' 1910s ' . Removes the ' s ' .
# Needs to happen before is _ uncertain because e . g . " 1860s ? "
t , is_decade = re . subn ( r'(\d{3}0)s' , r'\1' , t )
# detect approximation signifiers
# a few ' circa ' abbreviations just before the year
is_approximate = re . findall ( r'\b(ca?\.?) ?\d{4}' , t )
# the word ' circa ' anywhere
is_approximate = is_approximate or re . findall ( r'\bcirca\b' , t )
# the word ' approx ' / ' around ' / ' about ' anywhere
is_approximate = is_approximate or re . findall ( r'\b(approx|around|about)' , t )
# a ~ before a year - ish number
is_approximate = is_approximate or re . findall ( r'\b~\d{4}' , t )
# a ~ at the beginning
is_approximate = is_approximate or re . findall ( r'^~' , t )
# detect uncertainty signifiers
t , is_uncertain = re . subn ( r'(\d{4})\?' , r'\1' , t )
# the words uncertain / maybe / guess anywhere
is_uncertain = is_uncertain or re . findall ( r'\b(uncertain|possibly|maybe|guess)' , t )
# detect century forms
is_century = re . findall ( CENTURY_RE , t )
# detect CE / BCE year form
is_ce = re . findall ( CE_RE , t )
if is_century :
result = "%02dxx" % ( int ( is_century [ 0 ] [ 0 ] ) - 1 , )
is_approximate = is_approximate or re . findall ( r'\b(ca?\.?) ?' + CENTURY_RE , t )
is_uncertain = is_uncertain or re . findall ( CENTURY_RE + r'\?' , t )
try :
is_bc = is_century [ 0 ] [ - 1 ] in ( "bc" , "bce" )
if is_bc :
result = "-%s" % result
except IndexError :
pass
elif is_ce :
result = "%04d" % ( int ( is_ce [ 0 ] [ 0 ] ) )
is_approximate = is_approximate or re . findall ( r'\b(ca?\.?) ?' + CE_RE , t )
is_uncertain = is_uncertain or re . findall ( CE_RE + r'\?' , t )
try :
is_bc = is_ce [ 0 ] [ - 1 ] in ( "bc" , "bce" )
if is_bc :
result = "-%s" % result
except IndexError :
pass
else : # try dateutil . parse
try : # parse twice , using different defaults to see what was
# parsed and what was guessed .
dt1 = parse ( t , dayfirst = appsettings . DAY_FIRST , yearfirst = False , fuzzy = True , # force a match , even if it ' s default date
default = DEFAULT_DATE_1 )
dt2 = parse ( t , dayfirst = appsettings . DAY_FIRST , yearfirst = False , fuzzy = True , # force a match , even if it ' s default date
default = DEFAULT_DATE_2 )
except ValueError :
return
if dt1 . date ( ) == DEFAULT_DATE_1 . date ( ) and dt2 . date ( ) == DEFAULT_DATE_2 . date ( ) : # couldn ' t parse anything - defaults are untouched .
return
date1 = dt1 . isoformat ( ) [ : 10 ]
date2 = dt2 . isoformat ( ) [ : 10 ]
# guess precision of ' unspecified ' characters to use
mentions_year = re . findall ( r'\byear\b.+(in|during)\b' , t )
mentions_month = re . findall ( r'\bmonth\b.+(in|during)\b' , t )
mentions_day = re . findall ( r'\bday\b.+(in|during)\b' , t )
for i in xrange ( len ( date1 ) ) : # if the given year could be a century ( e . g . ' 1800s ' ) then use
# approximate / uncertain markers to decide whether we treat it as
# a century or a decade .
if i == 2 and could_be_century and not ( is_approximate or is_uncertain ) :
result += 'x'
elif i == 3 and is_decade > 0 :
if mentions_year :
result += 'u'
# year precision
else :
result += 'x'
# decade precision
elif date1 [ i ] == date2 [ i ] : # since both attempts at parsing produced the same result
# it must be parsed value , not a default
result += date1 [ i ]
else : # different values were produced , meaning that it ' s likely
# a default . Use ' unspecified '
result += "u"
# strip off unknown chars from end of string - except the first 4
for i in reversed ( xrange ( len ( result ) ) ) :
if result [ i ] not in ( 'u' , 'x' , '-' ) :
smallest_length = 4
if mentions_month :
smallest_length = 7
if mentions_day :
smallest_length = 10
limit = max ( smallest_length , i + 1 )
result = result [ : limit ]
break
# check for seasons
if "spring" in t :
result = result [ : 4 ] + "-21" + result [ 7 : ]
elif "summer" in t :
result = result [ : 4 ] + "-22" + result [ 7 : ]
elif "autumn" in t or "fall" in t :
result = result [ : 4 ] + "-23" + result [ 7 : ]
elif "winter" in t :
result = result [ : 4 ] + "-24" + result [ 7 : ]
# end dateutil post - parsing
if is_uncertain :
result += "?"
if is_approximate :
result += "~"
# weed out bad parses
if result . startswith ( "uu-uu" ) :
return None
return result
|
def save_data ( self ) :
"""Save data"""
|
title = _ ( "Save profiler result" )
filename , _selfilter = getsavefilename ( self , title , getcwd_or_home ( ) , _ ( "Profiler result" ) + " (*.Result)" )
if filename :
self . datatree . save_data ( filename )
|
def to_capabilities ( self ) :
"""Marshals the IE options to the correct object ."""
|
caps = self . _caps
opts = self . _options . copy ( )
if len ( self . _arguments ) > 0 :
opts [ self . SWITCHES ] = ' ' . join ( self . _arguments )
if len ( self . _additional ) > 0 :
opts . update ( self . _additional )
if len ( opts ) > 0 :
caps [ Options . KEY ] = opts
return caps
|
def getRawReportDescriptor ( self ) :
"""Return a binary string containing the raw HID report descriptor ."""
|
descriptor = _hidraw_report_descriptor ( )
size = ctypes . c_uint ( )
self . _ioctl ( _HIDIOCGRDESCSIZE , size , True )
descriptor . size = size
self . _ioctl ( _HIDIOCGRDESC , descriptor , True )
return '' . join ( chr ( x ) for x in descriptor . value [ : size . value ] )
|
def format_errors ( self , errors , many ) :
"""Format validation errors as JSON Error objects ."""
|
if not errors :
return { }
if isinstance ( errors , ( list , tuple ) ) :
return { 'errors' : errors }
formatted_errors = [ ]
if many :
for index , errors in iteritems ( errors ) :
for field_name , field_errors in iteritems ( errors ) :
formatted_errors . extend ( [ self . format_error ( field_name , message , index = index ) for message in field_errors ] )
else :
for field_name , field_errors in iteritems ( errors ) :
formatted_errors . extend ( [ self . format_error ( field_name , message ) for message in field_errors ] )
return { 'errors' : formatted_errors }
|
def _delete_from_hdx ( self , object_type , id_field_name ) : # type : ( str , str ) - > None
"""Helper method to deletes a resource from HDX
Args :
object _ type ( str ) : Description of HDX object type ( for messages )
id _ field _ name ( str ) : Name of field containing HDX object identifier
Returns :
None"""
|
if id_field_name not in self . data :
raise HDXError ( 'No %s field (mandatory) in %s!' % ( id_field_name , object_type ) )
self . _save_to_hdx ( 'delete' , id_field_name )
|
def batch_taxids ( list_of_names ) :
"""Opposite of batch _ taxonomy ( ) :
Convert list of Latin names to taxids"""
|
for name in list_of_names :
handle = Entrez . esearch ( db = 'Taxonomy' , term = name , retmode = "xml" )
records = Entrez . read ( handle )
yield records [ "IdList" ] [ 0 ]
|
def get_template ( self ) :
"""读取一个Excel模板 , 将此Excel的所有行读出来 , 并且识别特殊的标记进行记录
: return : 返回读取后的模板 , 结果类似 :
{ ' cols ' : # 各列 , 与subs不会同时生效
' subs ' : [ # 子模板
{ ' cols ' : # 各列 ,
' subs ' : # 子模板
' field ' : # 对应数据中字段名称
' field ' : # 对应数据中字段名称
子模板的判断根据第一列是否为 { { for field } } 来判断 , 结束使用 { { end } }"""
|
rows = [ ]
stack = [ ]
stack . append ( rows )
# top用来记录当前栈
top = rows
for i in range ( 1 , self . sheet . max_row + 1 ) :
cell = self . sheet . cell ( row = i , column = 1 )
# 是否子模板开始
if ( isinstance ( cell . value , ( str , unicode ) ) and cell . value . startswith ( '{{for ' ) and cell . value . endswith ( '}}' ) ) :
row = { 'field' : cell . value [ 6 : - 2 ] . strip ( ) , 'cols' : [ ] , 'subs' : [ ] }
top . append ( row )
top = row [ 'subs' ]
stack . append ( top )
if self . begin == 1 :
self . begin = i
# 是否子模板结束
elif ( isinstance ( cell . value , ( str , unicode ) ) and cell . value == '{{end}}' ) :
stack . pop ( )
top = stack [ - 1 ]
else :
row = { 'cols' : [ ] , 'subs' : [ ] }
cols = row [ 'cols' ]
for j in range ( 1 , self . sheet . max_column + 1 ) :
cell = self . sheet . cell ( row = i , column = j )
v = self . process_cell ( i , j , cell )
if v :
cols . append ( v )
if row [ 'cols' ] or row [ 'subs' ] :
top . append ( row )
# pprint ( rows )
return rows
|
def REV ( self , params ) :
"""REV Ra , Rb
Reverse the byte order in register Rb and store the result in Ra"""
|
Ra , Rb = self . get_two_parameters ( self . TWO_PARAMETER_COMMA_SEPARATED , params )
self . check_arguments ( low_registers = ( Ra , Rb ) )
def REV_func ( ) :
self . register [ Ra ] = ( ( self . register [ Rb ] & 0xFF000000 ) >> 24 ) | ( ( self . register [ Rb ] & 0x00FF0000 ) >> 8 ) | ( ( self . register [ Rb ] & 0x0000FF00 ) << 8 ) | ( ( self . register [ Rb ] & 0x000000FF ) << 24 )
return REV_func
|
def set_alpha ( self , alpha ) :
"""Set alpha / last value on all four lighting attributes ."""
|
self . transparency = alpha
self . diffuse [ 3 ] = alpha
self . ambient [ 3 ] = alpha
self . specular [ 3 ] = alpha
self . emissive [ 3 ] = alpha
|
def json_2_injector_gear ( json_obj ) :
"""transform the JSON return by Ariane server to local object
: param json _ obj : the JSON returned by Ariane server
: return : a new InjectorCachedGear"""
|
LOGGER . debug ( "InjectorCachedGear.json_2_injector_gear" )
return InjectorCachedGear ( gear_id = json_obj [ 'gearId' ] , gear_name = json_obj [ 'gearName' ] , gear_description = json_obj [ 'gearDescription' ] , gear_admin_queue = json_obj [ 'gearAdminQueue' ] , running = json_obj [ 'running' ] )
|
def innerHTML ( self ) -> str :
"""Get innerHTML of the inner node ."""
|
if self . _inner_element :
return self . _inner_element . innerHTML
return super ( ) . innerHTML
|
def get_OS_UUID ( cls , os ) :
"""Validate Storage OS and its UUID .
If the OS is a custom OS UUID , don ' t validate against templates ."""
|
if os in cls . templates :
return cls . templates [ os ]
uuid_regexp = '^[0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{12}$'
if re . search ( uuid_regexp , os ) :
return os
raise Exception ( ( "Invalid OS -- valid options are: 'CentOS 6.5', 'CentOS 7.0', " "'Debian 7.8', 'Debian 8.0' ,'Ubuntu 12.04', 'Ubuntu 14.04', 'Ubuntu 16.04', " "'Windows 2008', 'Windows 2012'" ) )
|
def template ( self , key ) :
"""Returns the template associated with this scaffold .
: param key | < str >
: return < projex . scaffold . Template > | | None"""
|
try :
return self . _templates [ key ]
except KeyError :
return Template . Plugins [ key ]
|
def escape_header ( val ) :
"""Escapes a value so that it can be used in a mime header"""
|
if val is None :
return None
try :
return quote ( val , encoding = "ascii" , safe = "/ " )
except ValueError :
return "utf-8''" + quote ( val , encoding = "utf-8" , safe = "/ " )
|
def _read_parquet_columns ( path , columns , num_splits , kwargs ) : # pragma : no cover
"""Use a Ray task to read columns from Parquet into a Pandas DataFrame .
Note : Ray functions are not detected by codecov ( thus pragma : no cover )
Args :
path : The path of the Parquet file .
columns : The list of column names to read .
num _ splits : The number of partitions to split the column into .
Returns :
A list containing the split Pandas DataFrames and the Index as the last
element . If there is not ` index _ col ` set , then we just return the length .
This is used to determine the total length of the DataFrame to build a
default Index ."""
|
import pyarrow . parquet as pq
df = pq . read_pandas ( path , columns = columns , ** kwargs ) . to_pandas ( )
# Append the length of the index here to build it externally
return _split_result_for_readers ( 0 , num_splits , df ) + [ len ( df . index ) ]
|
def _effective_view_filter ( self ) :
"""Returns the mongodb relationship filter for effective views"""
|
if self . _effective_view == EFFECTIVE :
now = datetime . datetime . utcnow ( )
return { 'startDate' : { '$$lte' : now } , 'endDate' : { '$$gte' : now } }
return { }
|
def alignment ( job , ids , input_args , sample ) :
"""Runs BWA and then Bamsort on the supplied fastqs for this sample
Input1 : Toil Job instance
Input2 : jobstore id dictionary
Input3 : Input arguments dictionary
Input4 : Sample tuple - - contains uuid and urls for the sample"""
|
uuid , urls = sample
# ids [ ' bam ' ] = job . fileStore . getEmptyFileStoreID ( )
work_dir = job . fileStore . getLocalTempDir ( )
output_dir = input_args [ 'output_dir' ]
key_path = input_args [ 'ssec' ]
cores = multiprocessing . cpu_count ( )
# I / O
return_input_paths ( job , work_dir , ids , 'ref.fa' , 'ref.fa.amb' , 'ref.fa.ann' , 'ref.fa.bwt' , 'ref.fa.pac' , 'ref.fa.sa' , 'ref.fa.fai' )
# Get fastqs associated with this sample
for url in urls :
download_encrypted_file ( work_dir , url , key_path , os . path . basename ( url ) )
# Parameters for BWA and Bamsort
docker_cmd = [ 'docker' , 'run' , '--rm' , '-v' , '{}:/data' . format ( work_dir ) ]
bwa_command = [ "jvivian/bwa" , "mem" , "-R" , "@RG\tID:{0}\tPL:Illumina\tSM:{0}\tLB:KapaHyper" . format ( uuid ) , "-T" , str ( 0 ) , "-t" , str ( cores ) , "/data/ref.fa" ] + [ os . path . join ( '/data/' , os . path . basename ( x ) ) for x in urls ]
bamsort_command = [ "jeltje/biobambam" , "/usr/local/bin/bamsort" , "inputformat=sam" , "level=1" , "inputthreads={}" . format ( cores ) , "outputthreads={}" . format ( cores ) , "calmdnm=1" , "calmdnmrecompindetonly=1" , "calmdnmreference=/data/ref.fa" , "I=/data/{}" . format ( uuid + '.sam' ) ]
# Piping the output to a file handle
with open ( os . path . join ( work_dir , uuid + '.sam' ) , 'w' ) as f_out :
subprocess . check_call ( docker_cmd + bwa_command , stdout = f_out )
with open ( os . path . join ( work_dir , uuid + '.bam' ) , 'w' ) as f_out :
subprocess . check_call ( docker_cmd + bamsort_command , stdout = f_out )
# Save in JobStore
# job . fileStore . updateGlobalFile ( ids [ ' bam ' ] , os . path . join ( work _ dir , uuid + ' . bam ' ) )
ids [ 'bam' ] = job . fileStore . writeGlobalFile ( os . path . join ( work_dir , uuid + '.bam' ) )
# Copy file to S3
if input_args [ 's3_dir' ] :
job . addChildJobFn ( upload_bam_to_s3 , ids , input_args , sample , cores = 32 , memory = '20 G' , disk = '30 G' )
# Move file in output _ dir
if input_args [ 'output_dir' ] :
move_to_output_dir ( work_dir , output_dir , uuid = None , files = [ uuid + '.bam' ] )
|
def get_default_for ( prop , value ) :
"""Ensures complex property types have the correct default values"""
|
prop = prop . strip ( '_' )
# Handle alternate props ( leading underscores )
val = reduce_value ( value )
# Filtering of value happens here
if prop in _COMPLEX_LISTS :
return wrap_value ( val )
elif prop in _COMPLEX_STRUCTS :
return val or { }
else :
return u'' if val is None else val
|
def get_project_root ( ) :
"""Get the project root folder as a string ."""
|
cfg = get_project_configuration ( )
# At this point it can be sure that the configuration file exists
# Now make sure the project structure exists
for dirname in [ "raw-datasets" , "preprocessed" , "feature-files" , "models" , "reports" ] :
directory = os . path . join ( cfg [ 'root' ] , dirname )
if not os . path . exists ( directory ) :
os . makedirs ( directory )
raw_yml_path = pkg_resources . resource_filename ( 'hwrt' , 'misc/' )
# TODO : How to check for updates if it already exists ?
raw_data_dst = os . path . join ( cfg [ 'root' ] , "raw-datasets/info.yml" )
if not os . path . isfile ( raw_data_dst ) :
raw_yml_pkg_src = os . path . join ( raw_yml_path , "info.yml" )
shutil . copy ( raw_yml_pkg_src , raw_data_dst )
# Make sure small - baseline folders exists
for dirname in [ "models/small-baseline" , "feature-files/small-baseline" , "preprocessed/small-baseline" ] :
directory = os . path . join ( cfg [ 'root' ] , dirname )
if not os . path . exists ( directory ) :
os . makedirs ( directory )
# Make sure small - baseline yml files exist
paths = [ ( "preprocessed/small-baseline/" , "preprocessing-small-info.yml" ) , ( "feature-files/small-baseline/" , "feature-small-info.yml" ) , ( "models/small-baseline/" , "model-small-info.yml" ) ]
for dest , src in paths :
raw_data_dst = os . path . join ( cfg [ 'root' ] , "%s/info.yml" % dest )
if not os . path . isfile ( raw_data_dst ) :
raw_yml_pkg_src = os . path . join ( raw_yml_path , src )
shutil . copy ( raw_yml_pkg_src , raw_data_dst )
return cfg [ 'root' ]
|
def run ( self ) :
'''Runs the NIfTI conversion based on internal state .'''
|
self . _log ( 'About to perform NifTI to %s conversion...\n' % self . _str_outputFileType )
frames = 1
frameStart = 0
frameEnd = 0
sliceStart = 0
sliceEnd = 0
if self . _b_4D :
self . _log ( '4D volume detected.\n' )
frames = self . _Vnp_4DVol . shape [ 3 ]
if self . _b_3D :
self . _log ( '3D volume detected.\n' )
if self . _b_convertMiddleFrame :
self . _frameToConvert = int ( frames / 2 )
if self . _frameToConvert == - 1 :
frameEnd = frames
else :
frameStart = self . _frameToConvert
frameEnd = self . _frameToConvert + 1
for f in range ( frameStart , frameEnd ) :
if self . _b_4D :
self . _Vnp_3DVol = self . _Vnp_4DVol [ : , : , : , f ]
slices = self . _Vnp_3DVol . shape [ 2 ]
if self . _b_convertMiddleSlice :
self . _sliceToConvert = int ( slices / 2 )
if self . _sliceToConvert == - 1 :
sliceEnd = - 1
else :
sliceStart = self . _sliceToConvert
sliceEnd = self . _sliceToConvert + 1
misc . mkdir ( self . _str_outputDir )
if self . _b_reslice :
for dim in [ 'x' , 'y' , 'z' ] :
self . dim_save ( dimension = dim , makeSubDir = True , indexStart = sliceStart , indexStop = sliceEnd , rot90 = True )
else :
self . dim_save ( dimension = 'z' , makeSubDir = False , indexStart = sliceStart , indexStop = sliceEnd , rot90 = True )
|
def _parse_node ( graph , text , condition_node_params , leaf_node_params ) :
"""parse dumped node"""
|
match = _NODEPAT . match ( text )
if match is not None :
node = match . group ( 1 )
graph . node ( node , label = match . group ( 2 ) , ** condition_node_params )
return node
match = _LEAFPAT . match ( text )
if match is not None :
node = match . group ( 1 )
graph . node ( node , label = match . group ( 2 ) , ** leaf_node_params )
return node
raise ValueError ( 'Unable to parse node: {0}' . format ( text ) )
|
def remove_agent ( self , agent : GUIAgent ) :
"""Removes the given agent .
: param agent : the agent to remove
: return :"""
|
self . index_manager . free_index ( agent . overall_index )
self . agents . remove ( agent )
self . update_teams_listwidgets ( )
self . overall_config . set_value ( MATCH_CONFIGURATION_HEADER , PARTICIPANT_COUNT_KEY , len ( self . agents ) )
self . overall_config . set_value ( PARTICIPANT_CONFIGURATION_HEADER , PARTICIPANT_LOADOUT_CONFIG_KEY , "None" , agent . overall_index )
if len ( self . agents ) == 0 :
return
if agent . get_team ( ) == 0 :
if self . blue_listwidget . count ( ) != 0 :
self . blue_listwidget . setCurrentRow ( self . blue_listwidget . count ( ) - 1 )
else :
self . orange_listwidget . setCurrentRow ( self . orange_listwidget . count ( ) - 1 )
else :
if self . orange_listwidget . count ( ) != 0 :
self . orange_listwidget . setCurrentRow ( self . orange_listwidget . count ( ) - 1 )
else :
self . blue_listwidget . setCurrentRow ( self . blue_listwidget . count ( ) - 1 )
|
def write_file ( data , outfilename ) :
"""Write a single file to disk ."""
|
if not data :
return False
try :
with open ( outfilename , 'w' ) as outfile :
for line in data :
if line :
outfile . write ( line )
return True
except ( OSError , IOError ) as err :
sys . stderr . write ( 'An error occurred while writing {0}:\n{1}' . format ( outfilename , str ( err ) ) )
return False
|
def to_int ( x , index = False ) :
"""Formatting series or timeseries columns to int and checking validity .
If ` index = False ` , the function works on the ` pd . Series x ` ; else ,
the function casts the index of ` x ` to int and returns x with a new index ."""
|
_x = x . index if index else x
cols = list ( map ( int , _x ) )
error = _x [ cols != _x ]
if not error . empty :
raise ValueError ( 'invalid values `{}`' . format ( list ( error ) ) )
if index :
x . index = cols
return x
else :
return _x
|
def range_depth ( ranges , size , verbose = True ) :
"""Overlay ranges on [ start , end ] , and summarize the ploidy of the intervals ."""
|
from jcvi . utils . iter import pairwise
from jcvi . utils . cbook import percentage
# Make endpoints
endpoints = [ ]
for a , b in ranges :
endpoints . append ( ( a , LEFT ) )
endpoints . append ( ( b , RIGHT ) )
endpoints . sort ( )
vstart , vend = min ( endpoints ) [ 0 ] , max ( endpoints ) [ 0 ]
assert 0 <= vstart < size
assert 0 <= vend < size
depth = 0
depthstore = defaultdict ( int )
depthstore [ depth ] += vstart
depthdetails = [ ( 0 , vstart , depth ) ]
for ( a , atag ) , ( b , btag ) in pairwise ( endpoints ) :
if atag == LEFT :
depth += 1
elif atag == RIGHT :
depth -= 1
depthstore [ depth ] += b - a
depthdetails . append ( ( a , b , depth ) )
assert btag == RIGHT
depth -= 1
assert depth == 0
depthstore [ depth ] += size - vend
depthdetails . append ( ( vend , size , depth ) )
assert sum ( depthstore . values ( ) ) == size
if verbose :
for depth , count in sorted ( depthstore . items ( ) ) :
print ( "Depth {0}: {1}" . format ( depth , percentage ( count , size ) ) , file = sys . stderr )
return depthstore , depthdetails
|
def recover ( self , runAsync = False ) :
"""If the shared configuration store for a site is unavailable , a site
in read - only mode will operate in a degraded capacity that allows
access to the ArcGIS Server Administrator Directory . You can recover
a site if the shared configuration store is permanently lost . The
site must be in read - only mode , and the site configuration files
must have been copied to the local repository when switching site
modes . The recover operation will copy the configuration store from
the local repository into the shared configuration store location .
The copied local repository will be from the machine in the site
where the recover operation is performed .
Inputs :
runAsync - default False - Decides if this operation must run
asynchronously ."""
|
url = self . _url + "/recover"
params = { "f" : "json" , "runAsync" : runAsync }
return self . _post ( url = url , param_dict = params , securityHandler = self . _securityHandler , proxy_url = self . _proxy_url , proxy_port = self . _proxy_port )
|
def range_yearly ( start = None , stop = None , timezone = 'UTC' , count = None ) :
"""This an alternative way to generating sets of Delorean objects with
YEARLY stops"""
|
return stops ( start = start , stop = stop , freq = YEARLY , timezone = timezone , count = count )
|
def toxml ( self ) :
"""Exports this object into a LEMS XML object"""
|
return '<With ' + ( ' instance="{0}"' . format ( self . instance ) if self . instance else '' ) + ( ' list="{0}"' . format ( self . list ) if self . list else '' ) + ( ' index="{0}"' . format ( self . index ) if self . index else '' ) + ' as="{1}"/>' . format ( self . instance , self . as_ )
|
def users_getPresence ( self , * , user : str , ** kwargs ) -> SlackResponse :
"""Gets user presence information .
Args :
user ( str ) : User to get presence info on . Defaults to the authed user .
e . g . ' W1234567890'"""
|
kwargs . update ( { "user" : user } )
return self . api_call ( "users.getPresence" , http_verb = "GET" , params = kwargs )
|
def _next_unit_in_service ( service , placed_in_services ) :
"""Return the unit number where to place a unit placed on a service .
Receive the service name and a dict mapping service names to the current
number of placed units in that service ."""
|
current = placed_in_services . get ( service )
number = 0 if current is None else current + 1
placed_in_services [ service ] = number
return number
|
def _consent_registration ( self , consent_args ) :
"""Register a request at the consent service
: type consent _ args : dict
: rtype : str
: param consent _ args : All necessary parameters for the consent request
: return : Ticket received from the consent service"""
|
jws = JWS ( json . dumps ( consent_args ) , alg = self . signing_key . alg ) . sign_compact ( [ self . signing_key ] )
request = "{}/creq/{}" . format ( self . api_url , jws )
res = requests . get ( request )
if res . status_code != 200 :
raise UnexpectedResponseError ( "Consent service error: %s %s" , res . status_code , res . text )
return res . text
|
def get_credentials ( ) :
"""Gets valid user credentials from storage .
If nothing has been stored , or if the stored credentials are invalid ,
the OAuth2 flow is completed to obtain the new credentials .
Returns :
Credentials , the obtained credential ."""
|
home_dir = os . path . expanduser ( '~' )
credential_dir = os . path . join ( home_dir , '.credentials' )
if not os . path . exists ( credential_dir ) :
os . makedirs ( credential_dir )
credential_path = os . path . join ( credential_dir , 'calendar-python-quickstart.json' )
store = Storage ( credential_path )
credentials = store . get ( )
if not credentials or credentials . invalid :
flow = client . flow_from_clientsecrets ( CLIENT_SECRET_FILE , SCOPES )
flow . user_agent = APPLICATION_NAME
if flags :
credentials = tools . run_flow ( flow , store , flags )
else : # Needed only for compatibility with Python 2.6
credentials = tools . run ( flow , store )
print ( 'Storing credentials to ' + credential_path )
return credentials
|
def url_api_version ( self , api_version ) :
"""Return base API url string for the QualysGuard api _ version and server ."""
|
# Set base url depending on API version .
if api_version == 1 : # QualysGuard API v1 url .
url = "https://%s/msp/" % ( self . server , )
elif api_version == 2 : # QualysGuard API v2 url .
url = "https://%s/" % ( self . server , )
elif api_version == 'was' : # QualysGuard REST v3 API url ( Portal API ) .
url = "https://%s/qps/rest/3.0/" % ( self . server , )
elif api_version == 'am' : # QualysGuard REST v1 API url ( Portal API ) .
url = "https://%s/qps/rest/1.0/" % ( self . server , )
elif api_version == 'am2' : # QualysGuard REST v1 API url ( Portal API ) .
url = "https://%s/qps/rest/2.0/" % ( self . server , )
else :
raise Exception ( "Unknown QualysGuard API Version Number (%s)" % ( api_version , ) )
logger . debug ( "Base url =\n%s" % ( url ) )
return url
|
def get_added_obs_importance ( self , obslist_dict = None , base_obslist = None , reset_zero_weight = False ) :
"""get a dataframe fo the posterior uncertainty
as a results of added some observations
Parameters
obslist _ dict : dict
a nested dictionary - list of groups of observations
that are to be treated as gained . key values become
row labels in returned dataframe . If None , then every zero - weighted
observation is tested sequentially . Default is None
base _ obslist : list
observation names to treat as the " existing " observations .
The values of obslist _ dict will be added to this list during
each test . If None , then the values in obslist _ dict will
be treated as the entire calibration dataset . That is , there
are no existing data . Default is None . Standard practice would
be to pass this argument as Schur . pst . nnz _ obs _ names .
reset _ zero _ weight : ( boolean or float )
a flag to reset observations with zero weight in either
obslist _ dict or base _ obslist . The value of reset _ zero _ weights
can be cast to a float , then that value will be assigned to
zero weight obs . Otherwise , zero weight obs will be given a
weight of 1.0 . Default is False .
Returns
pandas . DataFrame : pandas . DataFrame
dataframe with row labels ( index ) of obslist _ dict . keys ( ) and
columns of forecast _ name . The values in the dataframe are the
posterior variance of the forecasts resulting from notional inversion
using the observations in obslist _ dict [ key value ] plus the observations
in base _ obslist ( if any )
Note
all observations listed in obslist _ dict and base _ obslist with zero
weights will be dropped unless reset _ zero _ weight is set
Example
` ` > > > import pyemu ` `
` ` > > > sc = pyemu . Schur ( jco = " pest . jcb " ) ` `
` ` > > > df = sc . get _ added _ obs _ importance ( base _ obslist = sc . pst . nnz _ obs _ names , reset _ zero = True ) ` `"""
|
if obslist_dict is not None :
if type ( obslist_dict ) == list :
obslist_dict = dict ( zip ( obslist_dict , obslist_dict ) )
reset = False
if reset_zero_weight is not False :
if not self . obscov . isdiagonal :
raise NotImplementedError ( "cannot reset weights for non-" + "diagonal obscov" )
reset = True
try :
weight = float ( reset_zero_weight )
except :
weight = 1.0
self . logger . statement ( "resetting zero weights to {0}" . format ( weight ) )
# make copies of the original obscov and pst
# org _ obscov = self . obscov . get ( self . obscov . row _ names )
org_obscov = self . obscov . copy ( )
org_pst = self . pst . get ( )
obs = self . pst . observation_data
obs . index = obs . obsnme
# if we don ' t care about grouping obs , then just reset all weights at once
if base_obslist is None and obslist_dict is None and reset :
onames = [ name for name in self . pst . zero_weight_obs_names if name in self . jco . obs_names and name in self . obscov . row_names ]
obs . loc [ onames , "weight" ] = weight
# if needed reset the zero - weight obs in base _ obslist
if base_obslist is not None and reset : # check for zero
self . log ( "resetting zero weight obs in base_obslist" )
self . pst . _adjust_weights_by_list ( base_obslist , weight )
self . log ( "resetting zero weight obs in base_obslist" )
if base_obslist is None :
base_obslist = [ ]
else :
if type ( base_obslist ) != list :
self . logger . lraise ( "Schur.get_added_obs)_importance: base_obslist must be" + " type 'list', not {0}" . format ( str ( type ( base_obslist ) ) ) )
# if needed reset the zero - weight obs in obslist _ dict
if obslist_dict is not None and reset :
z_obs = [ ]
for case , obslist in obslist_dict . items ( ) :
if not isinstance ( obslist , list ) :
obslist_dict [ case ] = [ obslist ]
obslist = [ obslist ]
inboth = set ( base_obslist ) . intersection ( set ( obslist ) )
if len ( inboth ) > 0 :
raise Exception ( "observation(s) listed in both " + "base_obslist and obslist_dict: " + ',' . join ( inboth ) )
z_obs . extend ( obslist )
self . log ( "resetting zero weight obs in obslist_dict" )
self . pst . _adjust_weights_by_list ( z_obs , weight )
self . log ( "resetting zero weight obs in obslist_dict" )
# for a comprehensive obslist _ dict
if obslist_dict is None and reset :
obs = self . pst . observation_data
obs . index = obs . obsnme
onames = [ name for name in self . pst . zero_weight_obs_names if name in self . jco . obs_names and name in self . obscov . row_names ]
obs . loc [ onames , "weight" ] = weight
if obslist_dict is None :
obslist_dict = { name : name for name in self . pst . nnz_obs_names if name in self . jco . obs_names and name in self . obscov . row_names }
# reset the obs cov from the newly adjusted weights
if reset :
self . log ( "resetting self.obscov" )
self . reset_obscov ( self . pst )
self . log ( "resetting self.obscov" )
results = { }
names = [ "base" ]
if base_obslist is None or len ( base_obslist ) == 0 :
self . logger . statement ( "no base observation passed, 'base' case" + " is just the prior of the forecasts" )
for forecast , pr in self . prior_forecast . items ( ) :
results [ forecast ] = [ pr ]
# reset base obslist for use later
base_obslist = [ ]
else :
base_posterior = self . get ( par_names = self . jco . par_names , obs_names = base_obslist ) . posterior_forecast
for forecast , pt in base_posterior . items ( ) :
results [ forecast ] = [ pt ]
for case_name , obslist in obslist_dict . items ( ) :
names . append ( case_name )
if not isinstance ( obslist , list ) :
obslist = [ obslist ]
self . log ( "calculating importance of observations by adding: " + str ( obslist ) + '\n' )
# this case is the combination of the base obs plus whatever unique
# obs names in obslist
case_obslist = list ( base_obslist )
dedup_obslist = [ oname for oname in obslist if oname not in case_obslist ]
case_obslist . extend ( dedup_obslist )
# print ( self . pst . observation _ data . loc [ case _ obslist , : ] )
case_post = self . get ( par_names = self . jco . par_names , obs_names = case_obslist ) . posterior_forecast
for forecast , pt in case_post . items ( ) :
results [ forecast ] . append ( pt )
self . log ( "calculating importance of observations by adding: " + str ( obslist ) + '\n' )
df = pd . DataFrame ( results , index = names )
if reset :
self . reset_obscov ( org_obscov )
self . reset_pst ( org_pst )
return df
|
def main ( cls , args , settings = None , userdata = None ) : # pylint : disable = too - many - branches , too - many - statements
"""Main entry point of this module ."""
|
# The command - line client uses Service Account authentication .
logger . info ( 'Using Compute Engine credentials from %s' , Focus . info ( args . conf . name ) )
try :
key = json . loads ( args . conf . read ( ) )
except ValueError as msg :
logger . error ( 'Unable to parse %s: %s' , args . conf . name , msg )
return 1
# Compute Engine Manager
try :
cluster = ComputeEngineManager ( key [ 'client_email' ] , args . conf . name , key [ 'project_id' ] )
cluster . connect ( )
# Remove read | key | from memory which contains the private key .
del key
except ComputeEngineManagerException as msg :
logger . error ( msg )
return 1
# Create one or many compute VMs .
if args . create :
if args . tags :
logger . info ( 'Assigning the following tags to the instance: %r' , args . tags )
container_declaration = args . declaration . read ( )
try :
disk = cluster . build_bootdisk ( args . image , args . disk_size , args . disk_auto_delete )
conf = cluster . build_container_vm ( container_declaration , disk , zone = args . zone , tags = args . tags , preemptible = args . preemptible )
except ComputeEngineManagerException as msg :
logging . error ( msg )
return 1
logging . debug ( 'VM Configuration: %r' , conf )
try :
logging . info ( 'Creating %d VM%s of type "%s" ...' , args . count , Common . pluralize ( args . count ) , args . size )
nodes = cluster . create ( args . size , args . count , conf , image = args . image )
for node in nodes :
logging . info ( 'Node %s created and %s.' , node . name , node . state )
except ComputeEngineManagerException as msg :
logging . error ( msg )
return 1
# Run filters before dealing with any state routine .
nodes = [ ]
if any ( [ args . stop , args . start , args . reboot , args . terminate ] ) :
nodes = cls . list ( cls , cluster , args . zone , args . states , args . names , args . tags )
# Routines for other VM states .
if args . start :
try :
logger . info ( "Starting %d node%s ..." , len ( nodes ) , Common . pluralize ( nodes ) )
cluster . start ( nodes )
except ComputeEngineManagerException as msg :
logger . error ( msg )
return 1
if args . stop :
try :
logger . info ( "Stopping %d node%s ..." , len ( nodes ) , Common . pluralize ( nodes ) )
cluster . stop ( nodes )
except ComputeEngineManagerException as msg :
logger . error ( msg )
return 1
if args . reboot :
try :
logger . info ( "Rebooting %d node%s ..." , len ( nodes ) , Common . pluralize ( nodes ) )
cluster . reboot ( nodes )
except ComputeEngineManagerException as msg :
logger . error ( msg )
return 1
if args . terminate :
try :
logger . info ( "Terminating %d node%s ..." , len ( nodes ) , Common . pluralize ( nodes ) )
cluster . terminate_ex ( nodes , True )
except ComputeEngineManagerException as msg :
logger . error ( msg )
return 1
if args . list :
try :
nodes = cls . list ( cls , cluster , args . zone , args . states , args . names , args . tags )
for node in nodes :
logging . info ( 'Node: %s is %s; IP: %s (%s); Preemtible: %s' , node . name , node . state , Kurz . ips ( node ) , Kurz . zone ( node ) , Kurz . is_preemtible ( node ) )
except ComputeEngineManagerException as msg :
logger . error ( msg )
return 1
return 0
|
def generate_ctrlpts_weights ( ctrlpts ) :
"""Generates unweighted control points from weighted ones in 1 - D .
This function
# . Takes in 1 - D control points list whose coordinates are organized in ( x * w , y * w , z * w , w ) format
# . Converts the input control points list into ( x , y , z , w ) format
# . Returns the result
: param ctrlpts : 1 - D control points ( P )
: type ctrlpts : list
: return : 1 - D weighted control points ( Pw )
: rtype : list"""
|
# Divide control points by weight
new_ctrlpts = [ ]
for cpt in ctrlpts :
temp = [ float ( pt / cpt [ - 1 ] ) for pt in cpt ]
temp [ - 1 ] = float ( cpt [ - 1 ] )
new_ctrlpts . append ( temp )
return new_ctrlpts
|
def registerFilters ( self , ** kwargs ) :
"""Register multiple filters at once .
@ param * * kwargs : Multiple filters are registered using keyword
variables . Each keyword must correspond to a field name
with an optional suffix :
field : Field equal to value or in list of
values .
field _ ic : Field equal to value or in list of
values , using case insensitive
comparison .
field _ regex : Field matches regex value or matches
with any regex in list of values .
field _ ic _ regex : Field matches regex value or matches
with any regex in list of values
using case insensitive match ."""
|
for ( key , patterns ) in kwargs . items ( ) :
if key . endswith ( '_regex' ) :
col = key [ : - len ( '_regex' ) ]
is_regex = True
else :
col = key
is_regex = False
if col . endswith ( '_ic' ) :
col = col [ : - len ( '_ic' ) ]
ignore_case = True
else :
ignore_case = False
self . registerFilter ( col , patterns , is_regex , ignore_case )
|
def read_event ( self , check_result ) :
'''Convert the piped check result ( json ) into a global ' event ' dict'''
|
try :
event = json . loads ( check_result )
event [ 'occurrences' ] = event . get ( 'occurrences' , 1 )
event [ 'check' ] = event . get ( 'check' , { } )
event [ 'client' ] = event . get ( 'client' , { } )
return event
except Exception :
raise ValueError ( 'error reading event: ' + check_result )
|
def htmlentityreplace_errors ( ex ) :
"""An encoding error handler .
This python ` codecs ` _ error handler replaces unencodable
characters with HTML entities , or , if no HTML entity exists for
the character , XML character references .
> > > u ' The cost was \u20ac 12 . ' . encode ( ' latin1 ' , ' htmlentityreplace ' )
' The cost was & euro ; 12 . '"""
|
if isinstance ( ex , UnicodeEncodeError ) : # Handle encoding errors
bad_text = ex . object [ ex . start : ex . end ]
text = _html_entities_escaper . escape ( bad_text )
return ( compat . text_type ( text ) , ex . end )
raise ex
|
def _runParasol ( self , command , autoRetry = True ) :
"""Issues a parasol command using popen to capture the output . If the command fails then it
will try pinging parasol until it gets a response . When it gets a response it will
recursively call the issue parasol command , repeating this pattern for a maximum of N
times . The final exit value will reflect this ."""
|
command = list ( concat ( self . parasolCommand , command ) )
while True :
logger . debug ( 'Running %r' , command )
process = subprocess . Popen ( command , stdout = subprocess . PIPE , stderr = subprocess . PIPE , bufsize = - 1 )
stdout , stderr = process . communicate ( )
status = process . wait ( )
for line in stderr . decode ( 'utf-8' ) . split ( '\n' ) :
if line :
logger . warn ( line )
if status == 0 :
return 0 , stdout . decode ( 'utf-8' ) . split ( '\n' )
message = 'Command %r failed with exit status %i' % ( command , status )
if autoRetry :
logger . warn ( message )
else :
logger . error ( message )
return status , None
logger . warn ( 'Waiting for a 10s, before trying again' )
time . sleep ( 10 )
|
def serialize_numeric ( self , tag ) :
"""Return the literal representation of a numeric tag ."""
|
str_func = int . __str__ if isinstance ( tag , int ) else float . __str__
return str_func ( tag ) + tag . suffix
|
def update_user_groups ( self , user , claims ) :
"""Updates user group memberships based on the GROUPS _ CLAIM setting .
Args :
user ( django . contrib . auth . models . User ) : User model instance
claims ( dict ) : Claims from the access token"""
|
if settings . GROUPS_CLAIM is not None : # Update the user ' s group memberships
django_groups = [ group . name for group in user . groups . all ( ) ]
if settings . GROUPS_CLAIM in claims :
claim_groups = claims [ settings . GROUPS_CLAIM ]
if not isinstance ( claim_groups , list ) :
claim_groups = [ claim_groups , ]
else :
logger . debug ( "The configured groups claim '{}' was not found in the access token" . format ( settings . GROUPS_CLAIM ) )
claim_groups = [ ]
# Make a diff of the user ' s groups .
# Removing a user from all groups and then re - add them would cause
# the autoincrement value for the database table storing the
# user - to - group mappings to increment for no reason .
groups_to_remove = set ( django_groups ) - set ( claim_groups )
groups_to_add = set ( claim_groups ) - set ( django_groups )
# Loop through the groups in the group claim and
# add the user to these groups as needed .
for group_name in groups_to_remove :
group = Group . objects . get ( name = group_name )
user . groups . remove ( group )
logger . debug ( "User removed from group '{}'" . format ( group_name ) )
for group_name in groups_to_add :
try :
if settings . MIRROR_GROUPS :
group , _ = Group . objects . get_or_create ( name = group_name )
logger . debug ( "Created group '{}'" . format ( group_name ) )
else :
group = Group . objects . get ( name = group_name )
user . groups . add ( group )
logger . debug ( "User added to group '{}'" . format ( group_name ) )
except ObjectDoesNotExist : # Silently fail for non - existing groups .
pass
|
def stop_capture_coordinates ( self ) :
"""Exit the coordinate capture mode ."""
|
self . extent_dialog . _populate_coordinates ( )
self . extent_dialog . canvas . setMapTool ( self . extent_dialog . previous_map_tool )
self . parent . show ( )
|
def encrypt_to_file ( contents , filename ) :
"""Encrypts ` ` contents ` ` and writes it to ` ` filename ` ` .
` ` contents ` ` should be a bytes string . ` ` filename ` ` should end with
` ` . enc ` ` .
Returns the secret key used for the encryption .
Decrypt the file with : func : ` doctr . travis . decrypt _ file ` ."""
|
if not filename . endswith ( '.enc' ) :
raise ValueError ( "%s does not end with .enc" % filename )
key = Fernet . generate_key ( )
fer = Fernet ( key )
encrypted_file = fer . encrypt ( contents )
with open ( filename , 'wb' ) as f :
f . write ( encrypted_file )
return key
|
def merge_rootnodes ( self , other_docgraph ) :
"""Copy all the metadata from the root node of the other graph into this
one . Then , move all edges belonging to the other root node to this
one . Finally , remove the root node of the other graph from this one ."""
|
# copy metadata from other graph , cf . # 136
if 'metadata' in other_docgraph . node [ other_docgraph . root ] :
other_meta = other_docgraph . node [ other_docgraph . root ] [ 'metadata' ]
self . node [ self . root ] [ 'metadata' ] . update ( other_meta )
assert not other_docgraph . in_edges ( other_docgraph . root ) , "root node in graph '{}' must not have any ingoing edges" . format ( other_docgraph . name )
for ( root , target , attrs ) in other_docgraph . out_edges ( other_docgraph . root , data = True ) :
self . add_edge ( self . root , target , attr_dict = attrs )
self . remove_node ( other_docgraph . root )
|
def referenced ( self ) :
"""For a cursor that is a reference , returns a cursor
representing the entity that it references ."""
|
if not hasattr ( self , '_referenced' ) :
self . _referenced = conf . lib . clang_getCursorReferenced ( self )
return self . _referenced
|
def update_slaves ( self ) :
"""Update all ` slave ` | Substituter | objects .
See method | Substituter . update _ masters | for further information ."""
|
for slave in self . slaves :
slave . _medium2long . update ( self . _medium2long )
slave . update_slaves ( )
|
def create_post ( self , post_type , post_folders , post_subject , post_content , is_announcement = 0 , bypass_email = 0 , anonymous = False ) :
"""Create a post
It seems like if the post has ` < p > ` tags , then it ' s treated as HTML ,
but is treated as text otherwise . You ' ll want to provide ` content `
accordingly .
: type post _ type : str
: param post _ type : ' note ' , ' question '
: type post _ folders : str
: param post _ folders : Folder to put post into
: type post _ subject : str
: param post _ subject : Subject string
: type post _ content : str
: param post _ content : Content string
: type is _ announcement : bool
: param is _ announcement :
: type bypass _ email : bool
: param bypass _ email :
: type anonymous : bool
: param anonymous :
: rtype : dict
: returns : Dictionary with information about the created post ."""
|
params = { "anonymous" : "yes" if anonymous else "no" , "subject" : post_subject , "content" : post_content , "folders" : post_folders , "type" : post_type , "config" : { "bypass_email" : bypass_email , "is_announcement" : is_announcement } }
return self . _rpc . content_create ( params )
|
def variantAnnotationsGenerator ( self , request ) :
"""Returns a generator over the ( variantAnnotaitons , nextPageToken ) pairs
defined by the specified request ."""
|
compoundId = datamodel . VariantAnnotationSetCompoundId . parse ( request . variant_annotation_set_id )
dataset = self . getDataRepository ( ) . getDataset ( compoundId . dataset_id )
variantSet = dataset . getVariantSet ( compoundId . variant_set_id )
variantAnnotationSet = variantSet . getVariantAnnotationSet ( request . variant_annotation_set_id )
iterator = paging . VariantAnnotationsIntervalIterator ( request , variantAnnotationSet )
return iterator
|
def sync_files ( self , dataset_key ) :
"""Trigger synchronization process to update all dataset files linked to
source URLs .
: param dataset _ key : Dataset identifier , in the form of owner / id
: type dataset _ key : str
: raises RestApiException : If a server error occurs
Examples
> > > import datadotworld as dw
> > > api _ client = dw . api _ client ( )
> > > api _ client . sync _ files ( ' username / test - dataset ' ) # doctest : + SKIP"""
|
try :
self . _datasets_api . sync ( * ( parse_dataset_key ( dataset_key ) ) )
except _swagger . rest . ApiException as e :
raise RestApiError ( cause = e )
|
def extract_all_snow_tweets_from_disk_generator ( json_folder_path ) :
"""A generator that returns all SNOW tweets stored in disk .
Input : - json _ file _ path : The path of the folder containing the raw data .
Yields : - tweet : A tweet in python dictionary ( json ) format ."""
|
# Get a generator with all file paths in the folder
json_file_path_generator = ( json_folder_path + "/" + name for name in os . listdir ( json_folder_path ) )
for path in json_file_path_generator :
for tweet in extract_snow_tweets_from_file_generator ( path ) :
yield tweet
|
def index ( self , item ) :
"""Finds the child index of a given item , searchs in added order ."""
|
index_at = None
for ( i , child ) in enumerate ( self . _children ) :
if child . item == item :
index_at = i
break
if index_at is None :
raise ValueError ( "%s is not contained in any child" % ( item ) )
return index_at
|
def mute ( self ) :
"""bool : The speaker ' s mute state .
True if muted , False otherwise ."""
|
response = self . renderingControl . GetMute ( [ ( 'InstanceID' , 0 ) , ( 'Channel' , 'Master' ) ] )
mute_state = response [ 'CurrentMute' ]
return True if int ( mute_state ) else False
|
def system ( self ) :
"""Creates a reference to the System operations for Portal"""
|
url = self . _url + "/system"
return _System ( url = url , securityHandler = self . _securityHandler , proxy_url = self . _proxy_url , proxy_port = self . _proxy_port )
|
def extract_paths ( self , paths , * args , ** kwargs ) :
"""Thin method that just uses the provider"""
|
return self . provider . extract_paths ( paths , * args , ** kwargs )
|
def check_schema_transforms_match ( schema , inverted_features ) :
"""Checks that the transform and schema do not conflict .
Args :
schema : schema list
inverted _ features : inverted _ features dict
Raises :
ValueError if transform cannot be applied given schema type ."""
|
num_target_transforms = 0
for col_schema in schema :
col_name = col_schema [ 'name' ]
col_type = col_schema [ 'type' ] . lower ( )
# Check each transform and schema are compatible
if col_name in inverted_features :
for transform in inverted_features [ col_name ] :
transform_name = transform [ 'transform' ]
if transform_name == constant . TARGET_TRANSFORM :
num_target_transforms += 1
continue
elif col_type in constant . NUMERIC_SCHEMA :
if transform_name not in constant . NUMERIC_TRANSFORMS :
raise ValueError ( 'Transform %s not supported by schema %s' % ( transform_name , col_type ) )
elif col_type == constant . STRING_SCHEMA :
if ( transform_name not in constant . CATEGORICAL_TRANSFORMS + constant . TEXT_TRANSFORMS and transform_name != constant . IMAGE_TRANSFORM ) :
raise ValueError ( 'Transform %s not supported by schema %s' % ( transform_name , col_type ) )
else :
raise ValueError ( 'Unsupported schema type %s' % col_type )
# Check each transform is compatible for the same source column .
# inverted _ features [ col _ name ] should belong to exactly 1 of the 5 groups .
if col_name in inverted_features :
transform_set = { x [ 'transform' ] for x in inverted_features [ col_name ] }
if 1 != sum ( [ transform_set . issubset ( set ( constant . NUMERIC_TRANSFORMS ) ) , transform_set . issubset ( set ( constant . CATEGORICAL_TRANSFORMS ) ) , transform_set . issubset ( set ( constant . TEXT_TRANSFORMS ) ) , transform_set . issubset ( set ( [ constant . IMAGE_TRANSFORM ] ) ) , transform_set . issubset ( set ( [ constant . TARGET_TRANSFORM ] ) ) ] ) :
message = """
The source column of a feature can only be used in multiple
features within the same family of transforms. The familes are
1) text transformations: %s
2) categorical transformations: %s
3) numerical transformations: %s
4) image transformations: %s
5) target transform: %s
Any column can also be a key column.
But column %s is used by transforms %s.
""" % ( str ( constant . TEXT_TRANSFORMS ) , str ( constant . CATEGORICAL_TRANSFORMS ) , str ( constant . NUMERIC_TRANSFORMS ) , constant . IMAGE_TRANSFORM , constant . TARGET_TRANSFORM , col_name , str ( transform_set ) )
raise ValueError ( message )
if num_target_transforms != 1 :
raise ValueError ( 'Must have exactly one target transform' )
|
def project_name_changed ( self , widget , data = None ) :
"""Function controls whether run button is enabled"""
|
if widget . get_text ( ) != "" :
self . run_btn . set_sensitive ( True )
else :
self . run_btn . set_sensitive ( False )
self . update_full_label ( )
|
def create ( self , req , parent , name , mode , fi ) :
"""Create and open a file
Valid replies :
reply _ create
reply _ err"""
|
self . reply_err ( req , errno . ENOSYS )
|
def trace ( line ) :
"""Usage : % trace ( enable | disable ) [ file pattern ] [ output path ]"""
|
args = line . split ( )
enable = args [ 0 ] in { 'enable' , 'on' }
if not enable :
sys . settrace ( None )
sys . stdout = _ORIGINAL_STDOUT
return
pattern = args [ 1 ] if len ( args ) > 1 else None
sys . stdout = open ( args [ 2 ] , 'a' ) if len ( args ) > 2 else sys . stdout
sys . settrace ( partial ( trace_line , pattern ) )
|
def get_string ( self , recalculate_width = True ) :
"""Get the table as a String .
Parameters
recalculate _ width : bool , optional
If width for each column should be recalculated ( default True ) .
Note that width is always calculated if it wasn ' t set
explicitly when this method is called for the first time ,
regardless of the value of ` recalculate _ width ` .
Returns
str :
Table as a string ."""
|
# Empty table . returning empty string .
if len ( self . _table ) == 0 :
return ''
if self . serialno and self . column_count > 0 :
self . insert_column ( 0 , self . serialno_header , range ( 1 , len ( self ) + 1 ) )
# Should widths of column be recalculated
if recalculate_width or sum ( self . _column_widths ) == 0 :
self . _calculate_column_widths ( )
string_ = [ ]
# Drawing the top border
if self . top_border_char :
string_ . append ( self . _get_top_border ( ) )
# Print headers if not empty or only spaces
if '' . join ( self . _column_headers ) . strip ( ) :
headers = to_unicode ( self . _column_headers )
string_ . append ( headers )
if self . header_separator_char :
string_ . append ( self . _get_header_separator ( ) )
# Printing rows
first_row_encountered = False
for row in self . _table :
if first_row_encountered and self . row_separator_char :
string_ . append ( self . _get_row_separator ( ) )
first_row_encountered = True
content = to_unicode ( row )
string_ . append ( content )
# Drawing the bottom border
if self . bottom_border_char :
string_ . append ( self . _get_bottom_border ( ) )
if self . serialno and self . column_count > 0 :
self . pop_column ( 0 )
return '\n' . join ( string_ )
|
def is_ccw ( points ) :
"""Check if connected planar points are counterclockwise .
Parameters
points : ( n , 2 ) float , connected points on a plane
Returns
ccw : bool , True if points are counterclockwise"""
|
points = np . asanyarray ( points , dtype = np . float64 )
if ( len ( points . shape ) != 2 or points . shape [ 1 ] != 2 ) :
raise ValueError ( 'CCW is only defined for 2D' )
xd = np . diff ( points [ : , 0 ] )
yd = np . column_stack ( ( points [ : , 1 ] , points [ : , 1 ] ) ) . reshape ( - 1 ) [ 1 : - 1 ] . reshape ( ( - 1 , 2 ) ) . sum ( axis = 1 )
area = np . sum ( xd * yd ) * .5
ccw = area < 0
return ccw
|
def thumbnail ( self , value ) :
"""gets / sets the thumbnail
Enter the pathname to the thumbnail image to be used for the item .
The recommended image size is 200 pixels wide by 133 pixels high .
Acceptable image formats are PNG , GIF , and JPEG . The maximum file
size for an image is 1 MB . This is not a reference to the file but
the file itself , which will be stored on the sharing servers ."""
|
if os . path . isfile ( value ) and self . _thumbnail != value :
self . _thumbnail = value
elif value is None :
self . _thumbnail = None
|
def _build_gene_disease_model ( self , gene_id , relation_id , disease_id , variant_label , consequence_predicate = None , consequence_id = None , allelic_requirement = None , pmids = None ) :
"""Builds gene variant disease model
: return : None"""
|
model = Model ( self . graph )
geno = Genotype ( self . graph )
pmids = [ ] if pmids is None else pmids
is_variant = False
variant_or_gene = gene_id
variant_id_string = variant_label
variant_bnode = self . make_id ( variant_id_string , "_" )
if consequence_predicate is not None and consequence_id is not None :
is_variant = True
model . addTriple ( variant_bnode , consequence_predicate , consequence_id )
# Hack to add labels to terms that
# don ' t exist in an ontology
if consequence_id . startswith ( ':' ) :
model . addLabel ( consequence_id , consequence_id . strip ( ':' ) . replace ( '_' , ' ' ) )
if is_variant :
variant_or_gene = variant_bnode
# Typically we would type the variant using the
# molecular consequence , but these are not specific
# enough for us to make mappings ( see translation table )
model . addIndividualToGraph ( variant_bnode , variant_label , self . globaltt [ 'variant_locus' ] )
geno . addAffectedLocus ( variant_bnode , gene_id )
model . addBlankNodeAnnotation ( variant_bnode )
assoc = G2PAssoc ( self . graph , self . name , variant_or_gene , disease_id , relation_id )
assoc . source = pmids
assoc . add_association_to_graph ( )
if allelic_requirement is not None and is_variant is False :
model . addTriple ( assoc . assoc_id , self . globaltt [ 'has_allelic_requirement' ] , allelic_requirement )
if allelic_requirement . startswith ( ':' ) :
model . addLabel ( allelic_requirement , allelic_requirement . strip ( ':' ) . replace ( '_' , ' ' ) )
|
def getBigIndexFromIndices ( self , indices ) :
"""Get the big index from a given set of indices
@ param indices
@ return big index
@ note no checks are performed to ensure that the returned
indices are valid"""
|
return reduce ( operator . add , [ self . dimProd [ i ] * indices [ i ] for i in range ( self . ndims ) ] , 0 )
|
def mask_raster ( in_raster , mask , out_raster ) :
"""Mask raster data .
Args :
in _ raster : list or one raster
mask : Mask raster data
out _ raster : list or one raster"""
|
if is_string ( in_raster ) and is_string ( out_raster ) :
in_raster = [ str ( in_raster ) ]
out_raster = [ str ( out_raster ) ]
if len ( in_raster ) != len ( out_raster ) :
raise RuntimeError ( 'input raster and output raster must have the same size.' )
maskr = RasterUtilClass . read_raster ( mask )
rows = maskr . nRows
cols = maskr . nCols
maskdata = maskr . data
temp = maskdata == maskr . noDataValue
for inr , outr in zip ( in_raster , out_raster ) :
origin = RasterUtilClass . read_raster ( inr )
if origin . nRows == rows and origin . nCols == cols :
masked = numpy . where ( temp , origin . noDataValue , origin . data )
else :
masked = numpy . ones ( ( rows , cols ) ) * origin . noDataValue
# TODO , the following loop should be optimized by numpy or numba
for i in range ( rows ) :
for j in range ( cols ) :
if maskdata [ i ] [ j ] == maskr . noDataValue :
continue
# get the center point coordinate of current cell
tempx , tempy = maskr . get_central_coors ( i , j )
tempv = origin . get_value_by_xy ( tempx , tempy )
if tempv is None :
continue
masked [ i ] [ j ] = tempv
RasterUtilClass . write_gtiff_file ( outr , maskr . nRows , maskr . nCols , masked , maskr . geotrans , maskr . srs , origin . noDataValue , origin . dataType )
|
def build_command ( command , parameter_map ) :
"""Build command line ( s ) using the given parameter map .
Even if the passed a single ` command ` , this function will return a list
of shell commands . It is the caller ' s responsibility to concatenate them ,
likely using the semicolon or double ampersands .
: param command : The command to interpolate params into .
: type command : str | list [ str ]
: param parameter _ map : A ParameterMap object containing parameter knowledge .
: type parameter _ map : valohai _ yaml . objs . parameter _ map . ParameterMap
: return : list of commands
: rtype : list [ str ]"""
|
if isinstance ( parameter_map , list ) : # Partially emulate old ( pre - 0.7 ) API for this function .
parameter_map = LegacyParameterMap ( parameter_map )
out_commands = [ ]
for command in listify ( command ) : # Only attempt formatting if the string smells like it should be formatted .
# This allows the user to include shell syntax in the commands , if required .
# ( There ' s still naturally the chance for false - positives , so guard against
# those value errors and warn about them . )
if interpolable_re . search ( command ) :
try :
command = interpolable_re . sub ( lambda match : _replace_interpolation ( parameter_map , match ) , command , )
except ValueError as exc : # pragma : no cover
warnings . warn ( 'failed to interpolate into %r: %s' % ( command , exc ) , CommandInterpolationWarning )
out_commands . append ( command . strip ( ) )
return out_commands
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.