signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def connection_exists ( self , from_obj , to_obj ) :
"""Returns ` ` True ` ` if a connection between the given objects exists ,
else ` ` False ` ` ."""
|
self . _validate_ctypes ( from_obj , to_obj )
return self . connections . filter ( from_pk = from_obj . pk , to_pk = to_obj . pk ) . exists ( )
|
def _check_data ( self ) :
"""Check if the data given is valid"""
|
if self . axes is None :
raise PolygonFilterError ( "`axes` parm not set." )
if self . points is None :
raise PolygonFilterError ( "`points` parm not set." )
self . points = np . array ( self . points )
if self . points . shape [ 1 ] != 2 :
raise PolygonFilterError ( "data points' shape[1] must be 2." )
if self . name is None :
self . name = "polygon filter {}" . format ( self . unique_id )
if not isinstance ( self . inverted , bool ) :
raise PolygonFilterError ( "`inverted` must be boolean." )
|
def _ip_is_usable ( self , current_ip ) :
"""Check if the current Tor ' s IP is usable .
: argument current _ ip : current Tor IP
: type current _ ip : str
: returns bool"""
|
# Consider IP addresses only .
try :
ipaddress . ip_address ( current_ip )
except ValueError :
return False
# Never use real IP .
if current_ip == self . real_ip :
return False
# Do dot allow IP reuse .
if not self . _ip_is_safe ( current_ip ) :
return False
return True
|
def get_variables ( self , * args , ** kwargs ) :
"""Provide a warning that get _ variables on Sequential always returns ( ) ."""
|
tf . logging . warning ( "Calling Sequential.get_variables, which will always return an empty " "tuple. get_variables() can only return variables created directly by " "a Module, or created by submodules directly created inside the " "Module. Sequential is constructed from already constructed submodules " "and so this will always be empty. See the documentation for more " "details, but tl;dr if you need to connect some modules sequentially " "and call get_variables on the result, writing a simple custom module " "is the simplest way. Another option is to call get_all_variables()." )
return super ( Sequential , self ) . get_variables ( * args , ** kwargs )
|
def read ( self , path , encoding = None ) :
"""Read the template at the given path , and return it as a unicode string ."""
|
b = common . read ( path )
if encoding is None :
encoding = self . file_encoding
return self . unicode ( b , encoding )
|
def msg ( self , * args , ** kwargs ) :
"Only execute callback when interval is reached ."
|
if self . timestamp is None or self . _interval_reached ( ) :
self . callback ( * args , ** kwargs )
self . reset ( )
|
def create_role ( self , name ) :
'''Create a new : class : ` Role ` owned by this : class : ` Subject `'''
|
models = self . session . router
return models . role . new ( name = name , owner = self )
|
def authenticate ( self , provider = None , identifier = None ) :
"Fetch user for a given provider by id ."
|
provider_q = Q ( provider__name = provider )
if isinstance ( provider , Provider ) :
provider_q = Q ( provider = provider )
try :
access = AccountAccess . objects . filter ( provider_q , identifier = identifier ) . select_related ( 'user' ) [ 0 ]
except IndexError :
return None
else :
return access . user
|
def _get_address_override ( endpoint_type = PUBLIC ) :
"""Returns any address overrides that the user has defined based on the
endpoint type .
Note : this function allows for the service name to be inserted into the
address if the user specifies { service _ name } . somehost . org .
: param endpoint _ type : the type of endpoint to retrieve the override
value for .
: returns : any endpoint address or hostname that the user has overridden
or None if an override is not present ."""
|
override_key = ADDRESS_MAP [ endpoint_type ] [ 'override' ]
addr_override = config ( override_key )
if not addr_override :
return None
else :
return addr_override . format ( service_name = service_name ( ) )
|
def restart_kernel ( self ) :
"""Restart kernel of current client ."""
|
client = self . get_current_client ( )
if client is not None :
self . switch_to_plugin ( )
client . restart_kernel ( )
|
def get_logging_file_handler ( logger = None , file = None , formatter = LOGGING_DEFAULT_FORMATTER ) :
"""Adds a logging file handler to given logger or default logger using given file .
: param logger : Logger to add the handler to .
: type logger : Logger
: param file : File to verbose into .
: type file : unicode
: param formatter : Handler formatter .
: type formatter : Formatter
: return : Added handler .
: rtype : Handler"""
|
logger = LOGGER if logger is None else logger
file = tempfile . NamedTemporaryFile ( ) . name if file is None else file
logging_file_handler = logging . FileHandler ( file )
logging_file_handler . setFormatter ( formatter )
logger . addHandler ( logging_file_handler )
return logging_file_handler
|
def window_open_config ( self , temperature , duration ) :
"""Configures the window open behavior . The duration is specified in
5 minute increments ."""
|
_LOGGER . debug ( "Window open config, temperature: %s duration: %s" , temperature , duration )
self . _verify_temperature ( temperature )
if duration . seconds < 0 and duration . seconds > 3600 :
raise ValueError
value = struct . pack ( 'BBB' , PROP_WINDOW_OPEN_CONFIG , int ( temperature * 2 ) , int ( duration . seconds / 300 ) )
self . _conn . make_request ( PROP_WRITE_HANDLE , value )
|
def get_midpoint_outgroup ( self ) :
"""Returns the node that divides the current tree into two
distance - balanced partitions ."""
|
# Gets the farthest node to the current root
root = self . get_tree_root ( )
nA , r2A_dist = root . get_farthest_leaf ( )
nB , A2B_dist = nA . get_farthest_node ( )
outgroup = nA
middist = A2B_dist / 2.0
cdist = 0
current = nA
while current is not None :
cdist += current . dist
if cdist > ( middist ) : # Deja de subir cuando se pasa del maximo
break
else :
current = current . up
return current
|
def get_option_columns ( self , typ , element ) :
"""Return the column of the model to show for each level
Because each level might be displayed in a combobox . So you might want to provide the column
to show .
: param typ : the typ of options . E . g . Asset , Alembic , Camera etc
: type typ : str
: param element : The element for wich the options should be fetched .
: type element : : class : ` jukeboxcore . djadapter . models . Asset ` | : class : ` jukeboxcore . djadapter . models . Shot `
: returns : a list of columns
: rtype : list
: raises : None"""
|
inter = self . get_typ_interface ( typ )
return inter . get_option_columns ( element )
|
def _set_use_vrf ( self , v , load = False ) :
"""Setter method for use _ vrf , mapped from YANG variable / rbridge _ id / ssh / server / ssh _ vrf _ cont / use _ vrf ( list )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ use _ vrf is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ use _ vrf ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = YANGListType ( "use_vrf_name" , use_vrf . use_vrf , yang_name = "use-vrf" , rest_name = "use-vrf" , parent = self , is_container = 'list' , user_ordered = False , path_helper = self . _path_helper , yang_keys = 'use-vrf-name' , extensions = { u'tailf-common' : { u'info' : u'Configure VRF Name **' , u'cli-no-key-completion' : None , u'cli-suppress-mode' : None , u'cli-suppress-list-no' : None , u'cli-compact-syntax' : None , u'cli-suppress-key-abbreviation' : None , u'callpoint' : u'ssh_server_vrf_cp' } } ) , is_container = 'list' , yang_name = "use-vrf" , rest_name = "use-vrf" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Configure VRF Name **' , u'cli-no-key-completion' : None , u'cli-suppress-mode' : None , u'cli-suppress-list-no' : None , u'cli-compact-syntax' : None , u'cli-suppress-key-abbreviation' : None , u'callpoint' : u'ssh_server_vrf_cp' } } , namespace = 'urn:brocade.com:mgmt:brocade-sec-services' , defining_module = 'brocade-sec-services' , yang_type = 'list' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """use_vrf must be of a type compatible with list""" , 'defined-type' : "list" , 'generated-type' : """YANGDynClass(base=YANGListType("use_vrf_name",use_vrf.use_vrf, yang_name="use-vrf", rest_name="use-vrf", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='use-vrf-name', extensions={u'tailf-common': {u'info': u'Configure VRF Name **', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'ssh_server_vrf_cp'}}), is_container='list', yang_name="use-vrf", rest_name="use-vrf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure VRF Name **', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'ssh_server_vrf_cp'}}, namespace='urn:brocade.com:mgmt:brocade-sec-services', defining_module='brocade-sec-services', yang_type='list', is_config=True)""" , } )
self . __use_vrf = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def add_pane ( self , pane , vsplit = False ) :
"""Add another pane to this Window ."""
|
assert isinstance ( pane , Pane )
assert isinstance ( vsplit , bool )
split_cls = VSplit if vsplit else HSplit
if self . active_pane is None :
self . root . append ( pane )
else :
parent = self . _get_parent ( self . active_pane )
same_direction = isinstance ( parent , split_cls )
index = parent . index ( self . active_pane )
if same_direction :
parent . insert ( index + 1 , pane )
else :
new_split = split_cls ( [ self . active_pane , pane ] )
parent [ index ] = new_split
# Give the newly created split the same weight as the original
# pane that was at this position .
parent . weights [ new_split ] = parent . weights [ self . active_pane ]
self . active_pane = pane
self . zoom = False
|
def bounding_box ( self ) :
"""Generate a bounding box based on the full complexity part .
: return : bounding box of part
: rtype : cadquery . BoundBox"""
|
if self . world_coords :
return self . world_obj . findSolid ( ) . BoundingBox ( )
return self . local_obj . findSolid ( ) . BoundingBox ( )
|
def make_fileitem_peinfo_detectedentrypointsignature_name ( entrypoint_name , condition = 'is' , negate = False , preserve_case = False ) :
"""Create a node for FileItem / PEInfo / DetectedEntryPointSignature / Name
: return : A IndicatorItem represented as an Element node"""
|
document = 'FileItem'
search = 'FileItem/PEInfo/DetectedEntryPointSignature/Name'
content_type = 'string'
content = entrypoint_name
ii_node = ioc_api . make_indicatoritem_node ( condition , document , search , content_type , content , negate = negate , preserve_case = preserve_case )
return ii_node
|
def to_file ( self , filename , format = 'shtools' , header = None , errors = False , ** kwargs ) :
"""Save spherical harmonic coefficients to a file .
Usage
x . to _ file ( filename , [ format = ' shtools ' , header , errors ] )
x . to _ file ( filename , [ format = ' npy ' , * * kwargs ] )
Parameters
filename : str
Name of the output file .
format : str , optional , default = ' shtools '
' shtools ' or ' npy ' . See method from _ file ( ) for more information .
header : str , optional , default = None
A header string written to an ' shtools ' - formatted file directly
before the spherical harmonic coefficients .
errors : bool , optional , default = False
If True , save the errors in the file ( for ' shtools ' formatted
files only ) .
* * kwargs : keyword argument list , optional for format = ' npy '
Keyword arguments of numpy . save ( ) .
Description
If format = ' shtools ' , the coefficients and meta - data will be written to
an ascii formatted file . The first line is an optional user provided
header line , and the following line provides the attributes r0 , gm ,
omega , and lmax . The spherical harmonic coefficients are then listed ,
with increasing degree and order , with the format
l , m , coeffs [ 0 , l , m ] , coeffs [ 1 , l , m ]
where l and m are the spherical harmonic degree and order ,
respectively . If the errors are to be saved , the format of each line
will be
l , m , coeffs [ 0 , l , m ] , coeffs [ 1 , l , m ] , error [ 0 , l , m ] , error [ 1 , l , m ]
If format = ' npy ' , the spherical harmonic coefficients ( but not the
meta - data nor errors ) will be saved to a binary numpy ' npy ' file using
numpy . save ( ) ."""
|
if format is 'shtools' :
if errors is True and self . errors is None :
raise ValueError ( 'Can not save errors when then have not been ' 'initialized.' )
if self . omega is None :
omega = 0.
else :
omega = self . omega
with open ( filename , mode = 'w' ) as file :
if header is not None :
file . write ( header + '\n' )
file . write ( '{:.16e}, {:.16e}, {:.16e}, {:d}\n' . format ( self . r0 , self . gm , omega , self . lmax ) )
for l in range ( self . lmax + 1 ) :
for m in range ( l + 1 ) :
if errors is True :
file . write ( '{:d}, {:d}, {:.16e}, {:.16e}, ' '{:.16e}, {:.16e}\n' . format ( l , m , self . coeffs [ 0 , l , m ] , self . coeffs [ 1 , l , m ] , self . errors [ 0 , l , m ] , self . errors [ 1 , l , m ] ) )
else :
file . write ( '{:d}, {:d}, {:.16e}, {:.16e}\n' . format ( l , m , self . coeffs [ 0 , l , m ] , self . coeffs [ 1 , l , m ] ) )
elif format is 'npy' :
_np . save ( filename , self . coeffs , ** kwargs )
else :
raise NotImplementedError ( 'format={:s} not implemented' . format ( repr ( format ) ) )
|
def _greater_or_close ( a , value , ** kwargs ) :
r"""Compare values for greater or close to boolean masks .
Returns a boolean mask for values greater than or equal to a target within a specified
absolute or relative tolerance ( as in : func : ` numpy . isclose ` ) .
Parameters
a : array - like
Array of values to be compared
value : float
Comparison value
Returns
array - like
Boolean array where values are greater than or nearly equal to value ."""
|
return ( a > value ) | np . isclose ( a , value , ** kwargs )
|
def skip ( self , regex ) :
"""Like : meth : ` scan ` , but return the number of characters matched .
> > > s = Scanner ( " test string " )
> > > s . skip ( ' test ' )"""
|
return self . scan_full ( regex , return_string = False , advance_pointer = True )
|
def _imm_dir ( self ) :
'''An immutable object ' s dir function should list not only its attributes , but also its un - cached
lazy values .'''
|
dir0 = set ( dir ( self . __class__ ) )
dir0 . update ( self . __dict__ . keys ( ) )
dir0 . update ( six . iterkeys ( _imm_value_data ( self ) ) )
return sorted ( list ( dir0 ) )
|
def format_stats ( stats ) :
"""Given a dictionary following this layout :
' encoded : label ' : ' Encoded ' ,
' encoded : value ' : ' Yes ' ,
' encoded : description ' : ' Indicates if the column is encoded ' ,
' encoded : include ' : True ,
' size : label ' : ' Size ' ,
' size : value ' : 128,
' size : description ' : ' Size of the table in MB ' ,
' size : include ' : True ,
format _ stats will convert the dict into this structure :
' encoded ' : {
' id ' : ' encoded ' ,
' label ' : ' Encoded ' ,
' value ' : ' Yes ' ,
' description ' : ' Indicates if the column is encoded ' ,
' include ' : True
' size ' : {
' id ' : ' size ' ,
' label ' : ' Size ' ,
' value ' : 128,
' description ' : ' Size of the table in MB ' ,
' include ' : True"""
|
stats_collector = { }
for stat_key , stat_value in stats . items ( ) :
stat_id , stat_field = stat_key . split ( ":" )
stats_collector . setdefault ( stat_id , { "id" : stat_id } )
stats_collector [ stat_id ] [ stat_field ] = stat_value
# strip out all the stats we don ' t want
stats_collector = { stat_id : stats for stat_id , stats in stats_collector . items ( ) if stats . get ( 'include' , False ) }
# we always have a ' has _ stats ' field , it ' s never included
has_stats = { 'id' : 'has_stats' , 'label' : 'Has Stats?' , 'value' : len ( stats_collector ) > 0 , 'description' : 'Indicates whether there are statistics for this table' , 'include' : False , }
stats_collector [ 'has_stats' ] = has_stats
return stats_collector
|
def now ( self , when = None ) :
"""Set the current value to the correct tuple based on the seconds
since the epoch . If ' when ' is not provided , get the current time
from the task manager ."""
|
if when is None :
when = _TaskManager ( ) . get_time ( )
tup = time . localtime ( when )
self . value = ( tup [ 0 ] - 1900 , tup [ 1 ] , tup [ 2 ] , tup [ 6 ] + 1 )
return self
|
def addReference ( self , reference ) :
"""Adds the specified reference to this ReferenceSet ."""
|
id_ = reference . getId ( )
self . _referenceIdMap [ id_ ] = reference
self . _referenceNameMap [ reference . getLocalId ( ) ] = reference
self . _referenceIds . append ( id_ )
|
async def close_authenticator_async ( self ) :
"""Close the CBS auth channel and session asynchronously ."""
|
_logger . info ( "Shutting down CBS session on connection: %r." , self . _connection . container_id )
try :
self . _cbs_auth . destroy ( )
_logger . info ( "Auth closed, destroying session on connection: %r." , self . _connection . container_id )
await self . _session . destroy_async ( )
finally :
_logger . info ( "Finished shutting down CBS session on connection: %r." , self . _connection . container_id )
|
def prep_itasser_modeling ( self , itasser_installation , itlib_folder , runtype , create_in_dir = None , execute_from_dir = None , all_genes = False , print_exec = False , ** kwargs ) :
"""Prepare to run I - TASSER homology modeling for genes without structures , or all genes .
Args :
itasser _ installation ( str ) : Path to I - TASSER folder , i . e . ` ` ~ / software / I - TASSER4.4 ` `
itlib _ folder ( str ) : Path to ITLIB folder , i . e . ` ` ~ / software / ITLIB ` `
runtype : How you will be running I - TASSER - local , slurm , or torque
create _ in _ dir ( str ) : Local directory where folders will be created , if not provided default is the
GEM - PRO ' s ` ` data _ dir ` `
execute _ from _ dir ( str ) : Optional path to execution directory - use this if you are copying the homology
models to another location such as a supercomputer for running
all _ genes ( bool ) : If all genes should be prepped , or only those without any mapped structures
print _ exec ( bool ) : If the execution statement should be printed to run modelling
Todo :
* Document kwargs - extra options for I - TASSER , SLURM or Torque execution
* Allow modeling of any sequence in sequences attribute , select by ID or provide SeqProp ?"""
|
if not create_in_dir :
if not self . data_dir :
raise ValueError ( 'Output directory must be specified' )
self . homology_models_dir = op . join ( self . data_dir , 'homology_models' )
else :
self . homology_models_dir = create_in_dir
ssbio . utils . make_dir ( self . homology_models_dir )
if not execute_from_dir :
execute_from_dir = self . homology_models_dir
counter = 0
for g in self . genes_with_a_representative_sequence :
repstruct = g . protein . representative_structure
if repstruct and not all_genes :
log . debug ( '{}: representative structure set, skipping homology modeling' . format ( g . id ) )
continue
g . protein . prep_itasser_modeling ( itasser_installation = itasser_installation , itlib_folder = itlib_folder , runtype = runtype , create_in_dir = self . homology_models_dir , execute_from_dir = execute_from_dir , print_exec = print_exec , ** kwargs )
counter += 1
log . info ( 'Prepared I-TASSER modeling folders for {} genes in folder {}' . format ( counter , self . homology_models_dir ) )
|
def new_data ( self , mem , addr , data ) :
"""Callback for when new memory data has been fetched"""
|
if mem . id == self . id :
if addr == LocoMemory2 . ADR_ID_LIST :
self . _handle_id_list_data ( data )
elif addr == LocoMemory2 . ADR_ACTIVE_ID_LIST :
self . _handle_active_id_list_data ( data )
else :
id = int ( ( addr - LocoMemory2 . ADR_ANCHOR_BASE ) / LocoMemory2 . ANCHOR_PAGE_SIZE )
self . _handle_anchor_data ( id , data )
|
async def send_invoice ( self , chat_id : base . Integer , title : base . String , description : base . String , payload : base . String , provider_token : base . String , start_parameter : base . String , currency : base . String , prices : typing . List [ types . LabeledPrice ] , provider_data : typing . Union [ typing . Dict , None ] = None , photo_url : typing . Union [ base . String , None ] = None , photo_size : typing . Union [ base . Integer , None ] = None , photo_width : typing . Union [ base . Integer , None ] = None , photo_height : typing . Union [ base . Integer , None ] = None , need_name : typing . Union [ base . Boolean , None ] = None , need_phone_number : typing . Union [ base . Boolean , None ] = None , need_email : typing . Union [ base . Boolean , None ] = None , need_shipping_address : typing . Union [ base . Boolean , None ] = None , is_flexible : typing . Union [ base . Boolean , None ] = None , disable_notification : typing . Union [ base . Boolean , None ] = None , reply_to_message_id : typing . Union [ base . Integer , None ] = None , reply_markup : typing . Union [ types . InlineKeyboardMarkup , None ] = None ) -> types . Message :
"""Use this method to send invoices .
Source : https : / / core . telegram . org / bots / api # sendinvoice
: param chat _ id : Unique identifier for the target private chat
: type chat _ id : : obj : ` base . Integer `
: param title : Product name , 1-32 characters
: type title : : obj : ` base . String `
: param description : Product description , 1-255 characters
: type description : : obj : ` base . String `
: param payload : Bot - defined invoice payload , 1-128 bytes
This will not be displayed to the user , use for your internal processes .
: type payload : : obj : ` base . String `
: param provider _ token : Payments provider token , obtained via Botfather
: type provider _ token : : obj : ` base . String `
: param start _ parameter : Unique deep - linking parameter that can be used to generate this
invoice when used as a start parameter
: type start _ parameter : : obj : ` base . String `
: param currency : Three - letter ISO 4217 currency code , see more on currencies
: type currency : : obj : ` base . String `
: param prices : Price breakdown , a list of components
( e . g . product price , tax , discount , delivery cost , delivery tax , bonus , etc . )
: type prices : : obj : ` typing . List [ types . LabeledPrice ] `
: param provider _ data : JSON - encoded data about the invoice , which will be shared with the payment provider
: type provider _ data : : obj : ` typing . Union [ typing . Dict , None ] `
: param photo _ url : URL of the product photo for the invoice
: type photo _ url : : obj : ` typing . Union [ base . String , None ] `
: param photo _ size : Photo size
: type photo _ size : : obj : ` typing . Union [ base . Integer , None ] `
: param photo _ width : Photo width
: type photo _ width : : obj : ` typing . Union [ base . Integer , None ] `
: param photo _ height : Photo height
: type photo _ height : : obj : ` typing . Union [ base . Integer , None ] `
: param need _ name : Pass True , if you require the user ' s full name to complete the order
: type need _ name : : obj : ` typing . Union [ base . Boolean , None ] `
: param need _ phone _ number : Pass True , if you require the user ' s phone number to complete the order
: type need _ phone _ number : : obj : ` typing . Union [ base . Boolean , None ] `
: param need _ email : Pass True , if you require the user ' s email to complete the order
: type need _ email : : obj : ` typing . Union [ base . Boolean , None ] `
: param need _ shipping _ address : Pass True , if you require the user ' s shipping address to complete the order
: type need _ shipping _ address : : obj : ` typing . Union [ base . Boolean , None ] `
: param is _ flexible : Pass True , if the final price depends on the shipping method
: type is _ flexible : : obj : ` typing . Union [ base . Boolean , None ] `
: param disable _ notification : Sends the message silently . Users will receive a notification with no sound
: type disable _ notification : : obj : ` typing . Union [ base . Boolean , None ] `
: param reply _ to _ message _ id : If the message is a reply , ID of the original message
: type reply _ to _ message _ id : : obj : ` typing . Union [ base . Integer , None ] `
: param reply _ markup : A JSON - serialized object for an inline keyboard
If empty , one ' Pay total price ' button will be shown . If not empty , the first button must be a Pay button .
: type reply _ markup : : obj : ` typing . Union [ types . InlineKeyboardMarkup , None ] `
: return : On success , the sent Message is returned
: rtype : : obj : ` types . Message `"""
|
prices = prepare_arg ( [ price . to_python ( ) if hasattr ( price , 'to_python' ) else price for price in prices ] )
reply_markup = prepare_arg ( reply_markup )
payload_ = generate_payload ( ** locals ( ) )
result = await self . request ( api . Methods . SEND_INVOICE , payload_ )
return types . Message ( ** result )
|
def s3_cache_timeout ( self ) :
"""The socket timeout in seconds for connections to Amazon S3 ( an integer ) .
This value is injected into Boto ' s configuration to override the
default socket timeout used for connections to Amazon S3.
- Environment variable : ` ` $ PIP _ ACCEL _ S3 _ TIMEOUT ` `
- Configuration option : ` ` s3 - timeout ` `
- Default : ` ` 60 ` ` ( ` Boto ' s default ` _ )
. . _ Boto ' s default : http : / / boto . readthedocs . org / en / latest / boto _ config _ tut . html"""
|
value = self . get ( property_name = 's3_cache_timeout' , environment_variable = 'PIP_ACCEL_S3_TIMEOUT' , configuration_option = 's3-timeout' )
try :
n = int ( value )
if n >= 0 :
return n
except :
return 60
|
def compare ( self , other , t_threshold = 1e-3 , r_threshold = 1e-3 ) :
"""Compare two transformations
The RMSD values of the rotation matrices and the translation vectors
are computed . The return value is True when the RMSD values are below
the thresholds , i . e . when the two transformations are almost
identical ."""
|
return compute_rmsd ( self . t , other . t ) < t_threshold and compute_rmsd ( self . r , other . r ) < r_threshold
|
def status ( self ) :
"""Returns modified , added , removed , deleted files for current changeset"""
|
return self . repository . _repo . status ( self . _ctx . p1 ( ) . node ( ) , self . _ctx . node ( ) )
|
def longitude ( self , value ) :
"""Set longitude value in degrees ."""
|
self . _longitude = math . radians ( float ( value ) )
# update time _ zone
if abs ( ( value / 15.0 ) - self . time_zone ) > 1 : # if time _ zone doesn ' t match the longitude update the time _ zone
self . time_zone = value / 15.0
|
def on_for_distance ( self , speed , distance_mm , brake = True , block = True ) :
"""Drive distance _ mm"""
|
rotations = distance_mm / self . wheel . circumference_mm
log . debug ( "%s: on_for_rotations distance_mm %s, rotations %s, speed %s" % ( self , distance_mm , rotations , speed ) )
MoveTank . on_for_rotations ( self , speed , speed , rotations , brake , block )
|
def gen_opt_str ( ser_rec : pd . Series ) -> str :
'''generate rst option string
Parameters
ser _ rec : pd . Series
record for specifications
Returns
str
rst string'''
|
name = ser_rec . name
indent = r' '
str_opt = f'.. option:: {name}' + '\n\n'
for spec in ser_rec . sort_index ( ) . index :
str_opt += indent + f':{spec}:' + '\n'
spec_content = ser_rec [ spec ]
str_opt += indent + indent + f'{spec_content}' + '\n'
return str_opt
|
def create ( configs ) :
"""Creates AndroidDevice controller objects .
Args :
configs : A list of dicts , each representing a configuration for an
Android device .
Returns :
A list of AndroidDevice objects ."""
|
if not configs :
raise Error ( ANDROID_DEVICE_EMPTY_CONFIG_MSG )
elif configs == ANDROID_DEVICE_PICK_ALL_TOKEN :
ads = get_all_instances ( )
elif not isinstance ( configs , list ) :
raise Error ( ANDROID_DEVICE_NOT_LIST_CONFIG_MSG )
elif isinstance ( configs [ 0 ] , dict ) : # Configs is a list of dicts .
ads = get_instances_with_configs ( configs )
elif isinstance ( configs [ 0 ] , basestring ) : # Configs is a list of strings representing serials .
ads = get_instances ( configs )
else :
raise Error ( 'No valid config found in: %s' % configs )
valid_ad_identifiers = list_adb_devices ( ) + list_adb_devices_by_usb_id ( )
for ad in ads :
if ad . serial not in valid_ad_identifiers :
raise DeviceError ( ad , 'Android device is specified in config but' ' is not attached.' )
_start_services_on_ads ( ads )
return ads
|
def loads ( s , _dict = dict , decoder = None ) :
"""Parses string as toml
Args :
s : String to be parsed
_ dict : ( optional ) Specifies the class of the returned toml dictionary
Returns :
Parsed toml file represented as a dictionary
Raises :
TypeError : When a non - string is passed
TomlDecodeError : Error while decoding toml"""
|
implicitgroups = [ ]
if decoder is None :
decoder = TomlDecoder ( _dict )
retval = decoder . get_empty_table ( )
currentlevel = retval
if not isinstance ( s , basestring ) :
raise TypeError ( "Expecting something like a string" )
if not isinstance ( s , unicode ) :
s = s . decode ( 'utf8' )
original = s
sl = list ( s )
openarr = 0
openstring = False
openstrchar = ""
multilinestr = False
arrayoftables = False
beginline = True
keygroup = False
dottedkey = False
keyname = 0
for i , item in enumerate ( sl ) :
if item == '\r' and sl [ i + 1 ] == '\n' :
sl [ i ] = ' '
continue
if keyname :
if item == '\n' :
raise TomlDecodeError ( "Key name found without value." " Reached end of line." , original , i )
if openstring :
if item == openstrchar :
keyname = 2
openstring = False
openstrchar = ""
continue
elif keyname == 1 :
if item . isspace ( ) :
keyname = 2
continue
elif item == '.' :
dottedkey = True
continue
elif item . isalnum ( ) or item == '_' or item == '-' :
continue
elif ( dottedkey and sl [ i - 1 ] == '.' and ( item == '"' or item == "'" ) ) :
openstring = True
openstrchar = item
continue
elif keyname == 2 :
if item . isspace ( ) :
if dottedkey :
nextitem = sl [ i + 1 ]
if not nextitem . isspace ( ) and nextitem != '.' :
keyname = 1
continue
if item == '.' :
dottedkey = True
nextitem = sl [ i + 1 ]
if not nextitem . isspace ( ) and nextitem != '.' :
keyname = 1
continue
if item == '=' :
keyname = 0
dottedkey = False
else :
raise TomlDecodeError ( "Found invalid character in key name: '" + item + "'. Try quoting the key name." , original , i )
if item == "'" and openstrchar != '"' :
k = 1
try :
while sl [ i - k ] == "'" :
k += 1
if k == 3 :
break
except IndexError :
pass
if k == 3 :
multilinestr = not multilinestr
openstring = multilinestr
else :
openstring = not openstring
if openstring :
openstrchar = "'"
else :
openstrchar = ""
if item == '"' and openstrchar != "'" :
oddbackslash = False
k = 1
tripquote = False
try :
while sl [ i - k ] == '"' :
k += 1
if k == 3 :
tripquote = True
break
if k == 1 or ( k == 3 and tripquote ) :
while sl [ i - k ] == '\\' :
oddbackslash = not oddbackslash
k += 1
except IndexError :
pass
if not oddbackslash :
if tripquote :
multilinestr = not multilinestr
openstring = multilinestr
else :
openstring = not openstring
if openstring :
openstrchar = '"'
else :
openstrchar = ""
if item == '#' and ( not openstring and not keygroup and not arrayoftables ) :
j = i
try :
while sl [ j ] != '\n' :
sl [ j ] = ' '
j += 1
except IndexError :
break
if item == '[' and ( not openstring and not keygroup and not arrayoftables ) :
if beginline :
if len ( sl ) > i + 1 and sl [ i + 1 ] == '[' :
arrayoftables = True
else :
keygroup = True
else :
openarr += 1
if item == ']' and not openstring :
if keygroup :
keygroup = False
elif arrayoftables :
if sl [ i - 1 ] == ']' :
arrayoftables = False
else :
openarr -= 1
if item == '\n' :
if openstring or multilinestr :
if not multilinestr :
raise TomlDecodeError ( "Unbalanced quotes" , original , i )
if ( ( sl [ i - 1 ] == "'" or sl [ i - 1 ] == '"' ) and ( sl [ i - 2 ] == sl [ i - 1 ] ) ) :
sl [ i ] = sl [ i - 1 ]
if sl [ i - 3 ] == sl [ i - 1 ] :
sl [ i - 3 ] = ' '
elif openarr :
sl [ i ] = ' '
else :
beginline = True
elif beginline and sl [ i ] != ' ' and sl [ i ] != '\t' :
beginline = False
if not keygroup and not arrayoftables :
if sl [ i ] == '=' :
raise TomlDecodeError ( "Found empty keyname. " , original , i )
keyname = 1
s = '' . join ( sl )
s = s . split ( '\n' )
multikey = None
multilinestr = ""
multibackslash = False
pos = 0
for idx , line in enumerate ( s ) :
if idx > 0 :
pos += len ( s [ idx - 1 ] ) + 1
if not multilinestr or multibackslash or '\n' not in multilinestr :
line = line . strip ( )
if line == "" and ( not multikey or multibackslash ) :
continue
if multikey :
if multibackslash :
multilinestr += line
else :
multilinestr += line
multibackslash = False
if len ( line ) > 2 and ( line [ - 1 ] == multilinestr [ 0 ] and line [ - 2 ] == multilinestr [ 0 ] and line [ - 3 ] == multilinestr [ 0 ] ) :
try :
value , vtype = decoder . load_value ( multilinestr )
except ValueError as err :
raise TomlDecodeError ( str ( err ) , original , pos )
currentlevel [ multikey ] = value
multikey = None
multilinestr = ""
else :
k = len ( multilinestr ) - 1
while k > - 1 and multilinestr [ k ] == '\\' :
multibackslash = not multibackslash
k -= 1
if multibackslash :
multilinestr = multilinestr [ : - 1 ]
else :
multilinestr += "\n"
continue
if line [ 0 ] == '[' :
arrayoftables = False
if len ( line ) == 1 :
raise TomlDecodeError ( "Opening key group bracket on line by " "itself." , original , pos )
if line [ 1 ] == '[' :
arrayoftables = True
line = line [ 2 : ]
splitstr = ']]'
else :
line = line [ 1 : ]
splitstr = ']'
i = 1
quotesplits = decoder . _get_split_on_quotes ( line )
quoted = False
for quotesplit in quotesplits :
if not quoted and splitstr in quotesplit :
break
i += quotesplit . count ( splitstr )
quoted = not quoted
line = line . split ( splitstr , i )
if len ( line ) < i + 1 or line [ - 1 ] . strip ( ) != "" :
raise TomlDecodeError ( "Key group not on a line by itself." , original , pos )
groups = splitstr . join ( line [ : - 1 ] ) . split ( '.' )
i = 0
while i < len ( groups ) :
groups [ i ] = groups [ i ] . strip ( )
if len ( groups [ i ] ) > 0 and ( groups [ i ] [ 0 ] == '"' or groups [ i ] [ 0 ] == "'" ) :
groupstr = groups [ i ]
j = i + 1
while not groupstr [ 0 ] == groupstr [ - 1 ] :
j += 1
if j > len ( groups ) + 2 :
raise TomlDecodeError ( "Invalid group name '" + groupstr + "' Something " + "went wrong." , original , pos )
groupstr = '.' . join ( groups [ i : j ] ) . strip ( )
groups [ i ] = groupstr [ 1 : - 1 ]
groups [ i + 1 : j ] = [ ]
else :
if not _groupname_re . match ( groups [ i ] ) :
raise TomlDecodeError ( "Invalid group name '" + groups [ i ] + "'. Try quoting it." , original , pos )
i += 1
currentlevel = retval
for i in _range ( len ( groups ) ) :
group = groups [ i ]
if group == "" :
raise TomlDecodeError ( "Can't have a keygroup with an empty " "name" , original , pos )
try :
currentlevel [ group ]
if i == len ( groups ) - 1 :
if group in implicitgroups :
implicitgroups . remove ( group )
if arrayoftables :
raise TomlDecodeError ( "An implicitly defined " "table can't be an array" , original , pos )
elif arrayoftables :
currentlevel [ group ] . append ( decoder . get_empty_table ( ) )
else :
raise TomlDecodeError ( "What? " + group + " already exists?" + str ( currentlevel ) , original , pos )
except TypeError :
currentlevel = currentlevel [ - 1 ]
if group not in currentlevel :
currentlevel [ group ] = decoder . get_empty_table ( )
if i == len ( groups ) - 1 and arrayoftables :
currentlevel [ group ] = [ decoder . get_empty_table ( ) ]
except KeyError :
if i != len ( groups ) - 1 :
implicitgroups . append ( group )
currentlevel [ group ] = decoder . get_empty_table ( )
if i == len ( groups ) - 1 and arrayoftables :
currentlevel [ group ] = [ decoder . get_empty_table ( ) ]
currentlevel = currentlevel [ group ]
if arrayoftables :
try :
currentlevel = currentlevel [ - 1 ]
except KeyError :
pass
elif line [ 0 ] == "{" :
if line [ - 1 ] != "}" :
raise TomlDecodeError ( "Line breaks are not allowed in inline" "objects" , original , pos )
try :
decoder . load_inline_object ( line , currentlevel , multikey , multibackslash )
except ValueError as err :
raise TomlDecodeError ( str ( err ) , original , pos )
elif "=" in line :
try :
ret = decoder . load_line ( line , currentlevel , multikey , multibackslash )
except ValueError as err :
raise TomlDecodeError ( str ( err ) , original , pos )
if ret is not None :
multikey , multilinestr , multibackslash = ret
return retval
|
def cli ( obj , ids , query , filters , attributes ) :
"""Update alert attributes ."""
|
client = obj [ 'client' ]
if ids :
total = len ( ids )
else :
if query :
query = [ ( 'q' , query ) ]
else :
query = build_query ( filters )
total , _ , _ = client . get_count ( query )
ids = [ a . id for a in client . get_alerts ( query ) ]
with click . progressbar ( ids , label = 'Updating {} alerts' . format ( total ) ) as bar :
for id in bar :
client . update_attributes ( id , dict ( a . split ( '=' ) for a in attributes ) )
|
def cancel_merge_when_pipeline_succeeds ( self , ** kwargs ) :
"""Cancel merge when the pipeline succeeds .
Args :
* * kwargs : Extra options to send to the server ( e . g . sudo )
Raises :
GitlabAuthenticationError : If authentication is not correct
GitlabMROnBuildSuccessError : If the server could not handle the
request"""
|
path = ( '%s/%s/cancel_merge_when_pipeline_succeeds' % ( self . manager . path , self . get_id ( ) ) )
server_data = self . manager . gitlab . http_put ( path , ** kwargs )
self . _update_attrs ( server_data )
|
def get_lines_from_file ( filepath , encoding = "UTF-8" ) :
"""Return an iterator over lines ."""
|
try :
fd = codecs . open ( filepath , 'r' , encoding )
lines = fd . readlines ( )
except UnicodeDecodeError : # Fall back to ' ISO - 8859-1'
fd = codecs . open ( filepath , 'r' , 'ISO-8859-1' )
lines = fd . readlines ( )
finally :
fd . close ( )
return lines
|
def _pool ( self , pool_name , pool_function , k_height , k_width , d_height , d_width , mode , input_layer , num_channels_in ) :
"""Construct a pooling layer ."""
|
if input_layer is None :
input_layer = self . top_layer
else :
self . top_size = num_channels_in
name = pool_name + str ( self . counts [ pool_name ] )
self . counts [ pool_name ] += 1
if self . use_tf_layers :
pool = pool_function ( input_layer , [ k_height , k_width ] , [ d_height , d_width ] , padding = mode , data_format = self . channel_pos , name = name )
else :
if self . data_format == "NHWC" :
ksize = [ 1 , k_height , k_width , 1 ]
strides = [ 1 , d_height , d_width , 1 ]
else :
ksize = [ 1 , 1 , k_height , k_width ]
strides = [ 1 , 1 , d_height , d_width ]
pool = tf . nn . max_pool ( input_layer , ksize , strides , padding = mode , data_format = self . data_format , name = name )
self . top_layer = pool
return pool
|
def _string2Duration ( text ) :
"""CONVERT SIMPLE < float > < type > TO A DURATION OBJECT"""
|
if text == "" or text == "zero" :
return ZERO
amount , interval = re . match ( r"([\d\.]*)(.*)" , text ) . groups ( )
amount = int ( amount ) if amount else 1
if MILLI_VALUES [ interval ] == None :
from mo_logs import Log
Log . error ( "{{interval|quote}} in {{text|quote}} is not a recognized duration type (did you use the pural form by mistake?" , interval = interval , text = text )
output = Duration ( 0 )
if MONTH_VALUES [ interval ] == 0 :
output . milli = amount * MILLI_VALUES [ interval ]
else :
output . milli = amount * MONTH_VALUES [ interval ] * MILLI_VALUES . month
output . month = amount * MONTH_VALUES [ interval ]
return output
|
def onStart ( self , event ) :
"""Display the environment of a started container"""
|
c = event . container
print '+' * 5 , 'started:' , c
kv = lambda s : s . split ( '=' , 1 )
env = { k : v for ( k , v ) in ( kv ( s ) for s in c . attrs [ 'Config' ] [ 'Env' ] ) }
print env
|
def resolve_kw ( self , kwargs ) :
"""Resolve : kwargs : like ` story _ id : 1 ` to the form of ` id : 1 ` ."""
|
resolved = { }
for key , value in kwargs . items ( ) :
split = key . split ( '_' , 1 )
if len ( split ) > 1 :
key = split [ 1 ]
resolved [ key ] = value
return resolved
|
def pyquil_to_circuit ( program : pyquil . Program ) -> Circuit :
"""Convert a protoquil pyQuil program to a QuantumFlow Circuit"""
|
circ = Circuit ( )
for inst in program . instructions : # print ( type ( inst ) )
if isinstance ( inst , pyquil . Declare ) : # Ignore
continue
if isinstance ( inst , pyquil . Halt ) : # Ignore
continue
if isinstance ( inst , pyquil . Pragma ) : # TODO Barrier ?
continue
elif isinstance ( inst , pyquil . Measurement ) :
circ += Measure ( inst . qubit . index )
# elif isinstance ( inst , pyquil . ResetQubit ) : # TODO
# continue
elif isinstance ( inst , pyquil . Gate ) :
defgate = STDGATES [ inst . name ]
gate = defgate ( * inst . params )
qubits = [ q . index for q in inst . qubits ]
gate = gate . relabel ( qubits )
circ += gate
else :
raise ValueError ( 'PyQuil program is not protoquil' )
return circ
|
def end_address ( self ) :
"""Get basic block end address ."""
|
if self . _instrs is [ ] :
return None
return self . _instrs [ - 1 ] . address + self . _instrs [ - 1 ] . size - 1
|
def group ( self , meta = None , meta_aggregates = None , regs = None , regs_aggregates = None , meta_group_name = "_group" ) :
"""* Wrapper of * ` ` GROUP ` `
The GROUP operator is used for grouping both regions and / or metadata of input
dataset samples according to distinct values of certain attributes ( known as grouping
attributes ) ; new grouping attributes are added to samples in the output dataset ,
storing the results of aggregate function evaluations over metadata and / or regions
in each group of samples .
Samples having missing values for any of the grouping attributes are discarded .
: param meta : ( optional ) a list of metadata attributes
: param meta _ aggregates : ( optional ) { ' new _ attr ' : fun }
: param regs : ( optional ) a list of region fields
: param regs _ aggregates : { ' new _ attr ' : fun }
: param meta _ group _ name : ( optional ) the name to give to the group attribute in the
metadata
: return : a new GMQLDataset
Example of usage . We group samples by ` antibody ` and we aggregate the region pvalues taking the maximum value
calling the new region field ` maxPvalue ` : :
import gmql as gl
d1 = gl . get _ example _ dataset ( " Example _ Dataset _ 1 " )
result = d1 . group ( meta = [ ' antibody ' ] , regs _ aggregates = { ' maxPvalue ' : gl . MAX ( " pvalue " ) } )"""
|
if isinstance ( meta , list ) and all ( [ isinstance ( x , str ) for x in meta ] ) :
meta = Some ( meta )
elif meta is None :
meta = none ( )
else :
raise TypeError ( "meta must be a list of strings. " "{} was provided" . format ( type ( meta ) ) )
expBuild = self . pmg . getNewExpressionBuilder ( self . __index )
if isinstance ( meta_aggregates , dict ) :
metaAggregates = [ ]
for k in meta_aggregates :
if isinstance ( k , str ) :
item = meta_aggregates [ k ]
if isinstance ( item , ( SUM , MIN , MAX , AVG , BAG , BAGD , STD , MEDIAN , COUNTSAMP ) ) :
functionName = item . get_aggregate_name ( )
argument = item . get_argument ( )
if argument is None :
argument = none ( )
else :
argument = Some ( argument )
metaAggregates . append ( expBuild . createMetaAggregateFunction ( functionName , k , argument ) )
else :
raise TypeError ( "the item of the dictionary must be an Aggregate of the following: " "SUM, MIN, MAX, AVG, BAG, BAGD, STD, COUNTSAMP. " "{} was provided" . format ( type ( item ) ) )
else :
raise TypeError ( "keys of meta_aggregates must be string. " "{} was provided" . format ( type ( k ) ) )
metaAggregates = Some ( metaAggregates )
elif meta_aggregates is None :
metaAggregates = none ( )
else :
raise TypeError ( "meta_aggregates must be a dictionary of Aggregate functions. " "{} was provided" . format ( type ( meta_aggregates ) ) )
if isinstance ( regs , list ) and all ( [ isinstance ( x , str ) for x in regs ] ) :
regs = Some ( regs )
elif regs is None :
regs = none ( )
else :
raise TypeError ( "regs must be a list of strings. " "{} was provided" . format ( type ( regs ) ) )
if isinstance ( regs_aggregates , dict ) :
regionAggregates = [ ]
for k in regs_aggregates . keys ( ) :
if isinstance ( k , str ) :
item = regs_aggregates [ k ]
if isinstance ( item , ( SUM , MIN , MAX , AVG , BAG , BAGD , MEDIAN , COUNT ) ) :
op_name = item . get_aggregate_name ( )
op_argument = item . get_argument ( )
if op_argument is None :
op_argument = none ( )
else :
op_argument = Some ( op_argument )
regsToReg = expBuild . getRegionsToRegion ( op_name , k , op_argument )
regionAggregates . append ( regsToReg )
else :
raise TypeError ( "the item of the dictionary must be an Aggregate of the following: " "SUM, MIN, MAX, AVG, BAG, BAGD, MEDIAN, COUNT. " "{} was provided" . format ( type ( item ) ) )
else :
raise TypeError ( "The key of new_reg_fields must be a string. " "{} was provided" . format ( type ( k ) ) )
regionAggregates = Some ( regionAggregates )
elif regs_aggregates is None :
regionAggregates = none ( )
else :
raise TypeError ( "new_reg_fields must be a list of dictionary. " "{} was provided" . format ( type ( regs_aggregates ) ) )
if isinstance ( meta_group_name , str ) :
pass
else :
raise TypeError ( "meta_group_name must be a string. " "{} was provided" . format ( type ( meta_group_name ) ) )
new_index = self . opmng . group ( self . __index , meta , metaAggregates , meta_group_name , regs , regionAggregates )
return GMQLDataset ( index = new_index , location = self . location , local_sources = self . _local_sources , remote_sources = self . _remote_sources , meta_profile = self . meta_profile )
|
def _generate_autoscaling_metadata ( self , cls , args ) :
"""Provides special handling for the autoscaling . Metadata object"""
|
assert isinstance ( args , Mapping )
init_config = self . _create_instance ( cloudformation . InitConfig , args [ 'AWS::CloudFormation::Init' ] [ 'config' ] )
init = self . _create_instance ( cloudformation . Init , { 'config' : init_config } )
auth = None
if 'AWS::CloudFormation::Authentication' in args :
auth_blocks = { }
for k in args [ 'AWS::CloudFormation::Authentication' ] :
auth_blocks [ k ] = self . _create_instance ( cloudformation . AuthenticationBlock , args [ 'AWS::CloudFormation::Authentication' ] [ k ] , k )
auth = self . _create_instance ( cloudformation . Authentication , auth_blocks )
return cls ( init , auth )
|
def toc ( self ) :
"""Returns a rich list of texts in the catalog ."""
|
output = [ ]
for key in sorted ( self . catalog . keys ( ) ) :
edition = self . catalog [ key ] [ 'edition' ]
length = len ( self . catalog [ key ] [ 'transliteration' ] )
output . append ( "Pnum: {key}, Edition: {edition}, length: {length} line(s)" . format ( key = key , edition = edition , length = length ) )
return output
|
def get_shelveset_work_items ( self , shelveset_id ) :
"""GetShelvesetWorkItems .
Get work items associated with a shelveset .
: param str shelveset _ id : Shelveset ' s unique ID
: rtype : [ AssociatedWorkItem ]"""
|
query_parameters = { }
if shelveset_id is not None :
query_parameters [ 'shelvesetId' ] = self . _serialize . query ( 'shelveset_id' , shelveset_id , 'str' )
response = self . _send ( http_method = 'GET' , location_id = 'a7a0c1c1-373e-425a-b031-a519474d743d' , version = '5.0' , query_parameters = query_parameters )
return self . _deserialize ( '[AssociatedWorkItem]' , self . _unwrap_collection ( response ) )
|
def set_working_directory ( self , dirname ) :
"""Set current working directory .
In the workingdirectory and explorer plugins ."""
|
if dirname :
self . main . workingdirectory . chdir ( dirname , refresh_explorer = True , refresh_console = False )
|
def stations_iter ( self , * , page_size = 250 ) :
"""Get a paged iterator of library stations .
Parameters :
page _ size ( int , Optional ) : The maximum number of results per returned page .
Max allowed is ` ` 49995 ` ` .
Default : ` ` 250 ` `
Yields :
list : Station dicts ."""
|
start_token = None
while True :
response = self . _call ( mc_calls . RadioStation , max_results = page_size , start_token = start_token )
yield response . body . get ( 'data' , { } ) . get ( 'items' , [ ] )
start_token = response . body . get ( 'nextPageToken' )
if start_token is None :
break
|
def GetForwardedIps ( self , interface , interface_ip = None ) :
"""Retrieve the list of configured forwarded IP addresses .
Args :
interface : string , the output device to query .
interface _ ip : string , current interface ip address .
Returns :
list , the IP address strings ."""
|
try :
ips = netifaces . ifaddresses ( interface )
ips = ips [ netifaces . AF_INET ]
except ( ValueError , IndexError ) :
return [ ]
forwarded_ips = [ ]
for ip in ips :
if ip [ 'addr' ] != interface_ip :
full_addr = '%s/%d' % ( ip [ 'addr' ] , netaddr . IPAddress ( ip [ 'netmask' ] ) . netmask_bits ( ) )
forwarded_ips . append ( full_addr )
return self . ParseForwardedIps ( forwarded_ips )
|
def _DiscoverElementTypeFromLocalname ( self , type_localname ) :
"""Searches all namespaces for a type by name .
Args :
type _ localname : The name of the type .
Returns :
A fully qualified SOAP type with the specified name .
Raises :
A zeep . exceptions . LookupError if the type cannot be found in any
namespace ."""
|
elem_type = None
last_exception = None
for ns_prefix in self . zeep_client . wsdl . types . prefix_map . values ( ) :
try :
elem_type = self . zeep_client . get_type ( '{%s}%s' % ( ns_prefix , type_localname ) )
except zeep . exceptions . LookupError as e :
last_exception = e
continue
break
if not elem_type :
raise last_exception
return elem_type
|
def do ( self , resource , method , params = None , data = None , json = None , headers = None ) :
"""Does the request job
Args :
resource ( str ) : resource uri ( relative path )
method ( str ) : HTTP method
params ( dict ) : uri queries
data ( dict ) : HTTP body ( form )
json ( dict ) : HTTP body ( json )
headers ( dict ) : HTTP headers
Returns :
RTMResponse"""
|
uri = "{0}/{1}" . format ( self . _api_base , resource )
if not params :
params = { }
params . update ( { 'token' : self . _token } )
req = Request ( method = method , url = uri , params = params , headers = headers , data = data , json = json )
s = Session ( )
prepped = s . prepare_request ( req )
resp = s . send ( prepped )
return RTMResponse ( resp )
|
def _header ( self , message : str , with_gap = True ) :
"""Logs a special ( colored ) message at the indent level ."""
|
if with_gap :
self . _line_break ( )
self . _log ( self . _set_color ( message , self . YELLOW ) )
|
def insecure_channel ( target , options = None , * , loop = None , executor = None , standalone_pool_for_streaming = False ) :
"""Creates an insecure Channel to a server .
Args :
target : The server address
options : An optional list of key - value pairs ( channel args in gRPC runtime )
to configure the channel .
Returns :
A Channel object ."""
|
return Channel ( _grpc . insecure_channel ( target , options ) , loop , executor , standalone_pool_for_streaming )
|
def _update_stage ( awsclient , api_id , stage_name , method_settings ) :
"""Helper to apply method _ settings to stage
: param awsclient :
: param api _ id :
: param stage _ name :
: param method _ settings :
: return :"""
|
# settings docs in response : https : / / botocore . readthedocs . io / en / latest / reference / services / apigateway . html # APIGateway . Client . update _ stage
client_api = awsclient . get_client ( 'apigateway' )
operations = _convert_method_settings_into_operations ( method_settings )
if operations :
print ( 'update method settings for stage' )
_sleep ( )
response = client_api . update_stage ( restApiId = api_id , stageName = stage_name , patchOperations = operations )
|
def _infer_decorator_callchain ( node ) :
"""Detect decorator call chaining and see if the end result is a
static or a classmethod ."""
|
if not isinstance ( node , FunctionDef ) :
return None
if not node . parent :
return None
try :
result = next ( node . infer_call_result ( node . parent ) )
except exceptions . InferenceError :
return None
if isinstance ( result , bases . Instance ) :
result = result . _proxied
if isinstance ( result , ClassDef ) :
if result . is_subtype_of ( "%s.classmethod" % BUILTINS ) :
return "classmethod"
if result . is_subtype_of ( "%s.staticmethod" % BUILTINS ) :
return "staticmethod"
return None
|
def id_nameDAVID ( df , GTF = None , name_id = None ) :
"""Given a DAVIDenrich output it converts ensembl gene ids to genes names and adds this column to the output
: param df : a dataframe output from DAVIDenrich
: param GTF : a GTF dataframe from readGTF ( )
: param name _ id : instead of a gtf dataframe a dataframe with the columns ' gene _ name ' and ' gene _ id ' can be given as input
: returns : a pandas dataframe with a gene name column added to it ."""
|
if name_id is None :
gene_name = retrieve_GTF_field ( 'gene_name' , GTF )
gene_id = retrieve_GTF_field ( 'gene_id' , GTF )
GTF = pd . concat ( [ gene_name , gene_id ] , axis = 1 )
else :
GTF = name_id . copy ( )
df [ 'Gene_names' ] = "genes"
terms = df [ 'termName' ] . tolist ( )
enrichN = pd . DataFrame ( )
for term in terms :
tmp = df [ df [ 'termName' ] == term ]
tmp = tmp . reset_index ( drop = True )
ids = tmp . xs ( 0 ) [ 'geneIds' ]
ids = pd . DataFrame ( data = ids . split ( ", " ) )
ids . columns = [ 'geneIds' ]
ids [ 'geneIds' ] = ids [ 'geneIds' ] . map ( str . lower )
GTF [ 'gene_id' ] = GTF [ 'gene_id' ] . astype ( str )
GTF [ 'gene_id' ] = GTF [ 'gene_id' ] . map ( str . lower )
ids = pd . merge ( ids , GTF , how = 'left' , left_on = 'geneIds' , right_on = 'gene_id' )
names = ids [ 'gene_name' ] . tolist ( )
names = ', ' . join ( names )
tmp [ "Gene_names" ] = names
# tmp = tmp . replace ( to _ replace = tmp . xs ( 0 ) [ ' Gene _ names ' ] , value = names )
enrichN = pd . concat ( [ enrichN , tmp ] )
enrichN = enrichN . reset_index ( drop = True )
gene_names = enrichN [ [ 'Gene_names' ] ]
gpos = enrichN . columns . get_loc ( "geneIds" )
enrichN = enrichN . drop ( [ 'Gene_names' ] , axis = 1 )
cols = enrichN . columns . tolist ( )
enrichN = pd . concat ( [ enrichN [ cols [ : gpos + 1 ] ] , gene_names , enrichN [ cols [ gpos + 1 : ] ] ] , axis = 1 )
return enrichN
|
def cat_acc ( y , z ) :
"""Classification accuracy for multi - categorical case"""
|
weights = _cat_sample_weights ( y )
_acc = K . cast ( K . equal ( K . argmax ( y , axis = - 1 ) , K . argmax ( z , axis = - 1 ) ) , K . floatx ( ) )
_acc = K . sum ( _acc * weights ) / K . sum ( weights )
return _acc
|
def get_captcha_key ( self , captcha_image_url ) :
"""Read CAPTCHA key from user input"""
|
if self . interactive :
print ( 'Open CAPTCHA image url in your browser and enter it below: ' , captcha_image_url )
captcha_key = raw_input ( 'Enter CAPTCHA key: ' )
return captcha_key
else :
raise VkAuthError ( 'Captcha is required. Use interactive mode to enter it ' 'manually' )
|
def _handle_post ( self , transaction ) :
"""Handle POST requests
: type transaction : Transaction
: param transaction : the transaction that owns the request
: rtype : Transaction
: return : the edited transaction with the response to the request"""
|
path = str ( "/" + transaction . request . uri_path )
transaction . response = Response ( )
transaction . response . destination = transaction . request . source
transaction . response . token = transaction . request . token
# Create request
transaction = self . _server . resourceLayer . create_resource ( path , transaction )
return transaction
|
def handle_typical_memberdefs ( self , signature , memberdef_nodes ) :
"""Produces docstring entries containing an " Overloaded function "
section with the documentation for each overload , if the function is
overloaded and self . with _ overloaded _ functions is set . Else , produce
normal documentation ."""
|
if len ( memberdef_nodes ) == 1 or not self . with_overloaded_functions :
self . handle_typical_memberdefs_no_overload ( signature , memberdef_nodes )
return
self . add_text ( [ '\n' , '%feature("docstring") ' , signature , ' "' , '\n' ] )
if self . with_function_signature :
for n in memberdef_nodes :
self . add_line_with_subsequent_indent ( self . get_function_signature ( n ) )
self . add_text ( '\n' )
self . add_text ( [ 'Overloaded function' , '\n' , '-------------------' ] )
for n in memberdef_nodes :
self . add_text ( '\n' )
self . add_line_with_subsequent_indent ( '* ' + self . get_function_signature ( n ) )
self . subnode_parse ( n , pieces = [ ] , indent = 4 , ignore = [ 'definition' , 'name' ] )
self . add_text ( [ '";' , '\n' ] )
|
def owns_endpoint ( self , endpoint ) :
"""Tests if an endpoint name ( not path ) belongs to this Api . Takes
in to account the Blueprint name part of the endpoint name .
: param endpoint : The name of the endpoint being checked
: return : bool"""
|
if self . blueprint :
if endpoint . startswith ( self . blueprint . name ) :
endpoint = endpoint . split ( self . blueprint . name + '.' , 1 ) [ - 1 ]
else :
return False
return endpoint in self . endpoints
|
def reject_sv ( m , s , y ) :
"""Sample from N ( m , s ^ 2 ) times SV likelihood using rejection .
SV likelihood ( in x ) corresponds to y ~ N ( 0 , exp ( x ) ) ."""
|
mp = m + 0.5 * s ** 2 * ( - 1. + y ** 2 * np . exp ( - m ) )
ntries = 0
while True :
ntries += 1
x = stats . norm . rvs ( loc = mp , scale = s )
u = stats . uniform . rvs ( )
if np . log ( u ) < - 0.5 * y ** 2 * ( np . exp ( - x ) - np . exp ( - m ) * ( 1. + m - x ) ) :
break
if ntries > 1000 :
print ( '1000 failed attempt, m,s,y=%f, %f, %f' % ( m , s , y ) )
break
return x
|
def enabled ( name , ** kwargs ) :
'''Check to see if the named service is enabled to start on boot
CLI Example :
. . code - block : : bash
salt ' * ' service . enabled < service name >'''
|
if _service_is_upstart ( name ) :
return _upstart_is_enabled ( name )
else :
if _service_is_sysv ( name ) :
return _sysv_is_enabled ( name )
return None
|
def meta_to_indexes ( meta , table_name = None , model_name = None ) :
"""Find all the indexes ( primary keys ) based on the meta data"""
|
indexes , pk_field = { } , None
indexes = [ ]
for meta_model_name , model_meta in meta . iteritems ( ) :
if ( table_name or model_name ) and not ( table_name == model_meta [ 'Meta' ] . get ( 'db_table' , '' ) or model_name == meta_model_name ) :
continue
field_name , field_infodict , score = find_index ( model_meta )
indexes . append ( ( '%s.%s' % ( meta_model_name , field_name ) , field_infodict , score ) )
return indexes
|
def add_copy_spec ( self , copyspecs , sizelimit = None , tailit = True , pred = None ) :
"""Add a file or glob but limit it to sizelimit megabytes . If fname is
a single file the file will be tailed to meet sizelimit . If the first
file in a glob is too large it will be tailed to meet the sizelimit ."""
|
if not self . test_predicate ( pred = pred ) :
self . _log_info ( "skipped copy spec '%s' due to predicate (%s)" % ( copyspecs , self . get_predicate ( pred = pred ) ) )
return
if sizelimit is None :
sizelimit = self . get_option ( "log_size" )
if self . get_option ( 'all_logs' ) :
sizelimit = None
if sizelimit :
sizelimit *= 1024 * 1024
# in MB
if not copyspecs :
return False
if isinstance ( copyspecs , six . string_types ) :
copyspecs = [ copyspecs ]
for copyspec in copyspecs :
if not ( copyspec and len ( copyspec ) ) :
return False
if self . use_sysroot ( ) :
copyspec = self . join_sysroot ( copyspec )
files = self . _expand_copy_spec ( copyspec )
if len ( files ) == 0 :
continue
# Files hould be sorted in most - recently - modified order , so that
# we collect the newest data first before reaching the limit .
def getmtime ( path ) :
try :
return os . path . getmtime ( path )
except OSError :
return 0
files . sort ( key = getmtime , reverse = True )
current_size = 0
limit_reached = False
_file = None
for _file in files :
if self . _is_forbidden_path ( _file ) :
self . _log_debug ( "skipping forbidden path '%s'" % _file )
continue
try :
current_size += os . stat ( _file ) [ stat . ST_SIZE ]
except OSError :
self . _log_info ( "failed to stat '%s'" % _file )
if sizelimit and current_size > sizelimit :
limit_reached = True
break
self . _add_copy_paths ( [ _file ] )
if limit_reached and tailit and not _file_is_compressed ( _file ) :
file_name = _file
if file_name [ 0 ] == os . sep :
file_name = file_name . lstrip ( os . sep )
strfile = file_name . replace ( os . path . sep , "." ) + ".tailed"
self . add_string_as_file ( tail ( _file , sizelimit ) , strfile )
rel_path = os . path . relpath ( '/' , os . path . dirname ( _file ) )
link_path = os . path . join ( rel_path , 'sos_strings' , self . name ( ) , strfile )
self . archive . add_link ( link_path , _file )
|
def unregisterSdc ( self , sdcObj ) :
"""Unregister SDC from MDM / SIO Cluster
: param sdcObj : ScaleIO SDC object
: return : POST request response
: rtype : Requests POST response object"""
|
# TODO :
# Add code that unmap volume if mapped
self . conn . connection . _check_login ( )
response = self . conn . connection . _do_post ( "{}/{}{}/{}" . format ( self . conn . connection . _api_url , "instances/Sdc::" , sdcObj . id , 'action/removeSdc' ) )
return response
|
def render ( json_data , saltenv = 'base' , sls = '' , ** kws ) :
'''Accepts JSON as a string or as a file object and runs it through the JSON
parser .
: rtype : A Python data structure'''
|
if not isinstance ( json_data , six . string_types ) :
json_data = json_data . read ( )
if json_data . startswith ( '#!' ) :
json_data = json_data [ ( json_data . find ( '\n' ) + 1 ) : ]
if not json_data . strip ( ) :
return { }
return json . loads ( json_data )
|
def lchop ( string , prefix ) :
"""Removes a prefix from string
: param string : String , possibly prefixed with prefix
: param prefix : Prefix to remove from string
: returns : string without the prefix"""
|
if string . startswith ( prefix ) :
return string [ len ( prefix ) : ]
return string
|
def write_def_finish ( self , node , buffered , filtered , cached , callstack = True ) :
"""write the end section of a rendering function , either outermost or
inline .
this takes into account if the rendering function was filtered ,
buffered , etc . and closes the corresponding try : block if any , and
writes code to retrieve captured content , apply filters , send proper
return value ."""
|
if not buffered and not cached and not filtered :
self . printer . writeline ( "return ''" )
if callstack :
self . printer . writelines ( "finally:" , "context.caller_stack._pop_frame()" , None )
if buffered or filtered or cached :
if buffered or cached : # in a caching scenario , don ' t try to get a writer
# from the context after popping ; assume the caching
# implemenation might be using a context with no
# extra buffers
self . printer . writelines ( "finally:" , "__M_buf = context._pop_buffer()" )
else :
self . printer . writelines ( "finally:" , "__M_buf, __M_writer = context._pop_buffer_and_writer()" )
if callstack :
self . printer . writeline ( "context.caller_stack._pop_frame()" )
s = "__M_buf.getvalue()"
if filtered :
s = self . create_filter_callable ( node . filter_args . args , s , False )
self . printer . writeline ( None )
if buffered and not cached :
s = self . create_filter_callable ( self . compiler . buffer_filters , s , False )
if buffered or cached :
self . printer . writeline ( "return %s" % s )
else :
self . printer . writelines ( "__M_writer(%s)" % s , "return ''" )
|
def AddDirectory ( self , path ) :
"""Adds a directory to the fake file system .
Note that this function will create parent directories if needed .
Args :
path ( str ) : path of the directory within the fake file system .
Raises :
ValueError : if the path is already set ."""
|
if self . file_system . FileEntryExistsByPath ( path ) :
raise ValueError ( 'Path: {0:s} already set.' . format ( path ) )
self . _AddParentDirectories ( path )
self . file_system . AddFileEntry ( path , file_entry_type = definitions . FILE_ENTRY_TYPE_DIRECTORY )
|
def instantiate_from_config ( cfg ) :
"""Instantiate data types from config"""
|
for h in cfg :
if h . get ( "type" ) in data_types :
raise KeyError ( "Data type '%s' already exists" % h )
data_types [ h . get ( "type" ) ] = DataType ( h )
|
def get_max_capvol ( self , remove = True , insert = True , volume = None ) :
"""Give max capacity in mAh / cc for inserting and removing a charged cation into base structure .
Args :
remove : ( bool ) whether to allow cation removal
insert : ( bool ) whether to allow cation insertion
volume : ( float ) volume to use for normalization ( default = volume of initial structure )
Returns :
max vol capacity in mAh / cc"""
|
vol = volume if volume else self . struc_oxid . volume
return self . _get_max_cap_ah ( remove , insert ) * 1000 * 1E24 / ( vol * const . N_A )
|
def asd ( M1 , M2 ) :
"""Compute a Fourier transform based distance
between two matrices .
Inspired from Galiez et al . , 2015
( https : / / www . ncbi . nlm . nih . gov / pmc / articles / PMC4535829 / )"""
|
from scipy . fftpack import fft2
spectra1 = np . abs ( fft2 ( M1 ) )
spectra2 = np . abs ( fft2 ( M2 ) )
return np . linalg . norm ( spectra2 - spectra1 )
|
def merge ( self , hosts ) :
"""Merge extended host information into services
: param hosts : hosts list , to look for a specific one
: type hosts : alignak . objects . host . Hosts
: return : None"""
|
for extinfo in self :
host_name = extinfo . get_name ( )
host = hosts . find_by_name ( host_name )
if host is not None : # Fusion
self . merge_extinfo ( host , extinfo )
|
def generate_parameters ( self , parameter_id ) :
"""Returns a dict of trial ( hyper - ) parameters , as a serializable object .
Parameters
parameter _ id : int
Returns
config : dict"""
|
if not self . population :
raise RuntimeError ( 'The population is empty' )
pos = - 1
for i in range ( len ( self . population ) ) :
if self . population [ i ] . result is None :
pos = i
break
if pos != - 1 :
indiv = copy . deepcopy ( self . population [ pos ] )
self . population . pop ( pos )
total_config = indiv . config
else :
random . shuffle ( self . population )
if self . population [ 0 ] . result < self . population [ 1 ] . result :
self . population [ 0 ] = self . population [ 1 ]
# mutation
space = json2space ( self . searchspace_json , self . population [ 0 ] . config )
is_rand = dict ( )
mutation_pos = space [ random . randint ( 0 , len ( space ) - 1 ) ]
for i in range ( len ( self . space ) ) :
is_rand [ self . space [ i ] ] = ( self . space [ i ] == mutation_pos )
config = json2paramater ( self . searchspace_json , is_rand , self . random_state , self . population [ 0 ] . config )
self . population . pop ( 1 )
# remove " _ index " from config and save params - id
total_config = config
self . total_data [ parameter_id ] = total_config
config = _split_index ( total_config )
return config
|
def forward_complex ( self , log_sigma ) :
"""Compute a model response , i . e . complex impedances
Parameters
log _ sigma : 1xN or 2xN numpy . ndarray
Model parameters log sigma , N the number of cells . If first
dimension is of length one , assume phase values to be zero
Returns
measurements : Nx2 numpy nd array
Return log _ e sigma values of computed forward response"""
|
m = 1.0 / np . exp ( log_sigma )
tdm = self . _get_tdm ( m )
measurements = tdm . measurements ( )
# import IPython
# IPython . embed ( )
# convert R to log sigma
measurements [ : , 0 ] = np . log ( 1.0 / measurements [ : , 0 ] )
return measurements
|
def put_policy ( Bucket , Policy , region = None , key = None , keyid = None , profile = None ) :
'''Given a valid config , update the policy for a bucket .
Returns { updated : true } if policy was updated and returns
{ updated : False } if policy was not updated .
CLI Example :
. . code - block : : bash
salt myminion boto _ s3 _ bucket . put _ policy my _ bucket { . . . }'''
|
try :
conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile )
if Policy is None :
Policy = '{}'
elif not isinstance ( Policy , six . string_types ) :
Policy = salt . utils . json . dumps ( Policy )
conn . put_bucket_policy ( Bucket = Bucket , Policy = Policy )
return { 'updated' : True , 'name' : Bucket }
except ClientError as e :
return { 'updated' : False , 'error' : __utils__ [ 'boto3.get_error' ] ( e ) }
|
def todjango ( table , model , update = True , create = True , use_bulk_create = True , * args , ** kwargs ) :
'''Given a table with appropriate headings create Django models .'''
|
assert issubclass ( model , Model ) , 'Must be supplied a valid Django model class'
table_iterator = iter ( table )
table_headers = table_iterator . next ( )
model_pk_field_name = model . _meta . pk . name
model_field_names = _get_model_field_names ( model )
model_name = model . __name__
existing_models = _get_django_objects ( model )
existing_model_map = dict ( [ ( m . pk , m ) for m in existing_models ] )
if update : # if we are going to update existing models we need to have a table field that
# corresponds to the model ' pk ' field .
assert model_pk_field_name in set ( model_field_names ) , 'To be able to update existing models the data must have a field corresponding to the Django Primary Key field {}' . format ( model_pk_field_name )
# existing _ model _ pks = model . objects . all ( ) . values _ list ( ' pk ' , flat = True ) # TODO : this could be a performance bottleneck if lots of models are being updated
updated_model_count = 0
unsaved_models = [ ]
for row in table_iterator :
value_map = dict ( zip ( table_headers , row ) )
pk = value_map . get ( model_pk_field_name , None )
try :
django_object = existing_model_map [ pk ]
if _will_model_change ( value_map , django_object ) :
_apply_value_map ( value_map , django_object )
try :
django_object . save ( )
except Exception as e : # Add the data that cause the exception to the exception as reraise
e . petl_data = value_map
raise e
updated_model_count += 1
except KeyError :
django_object = model ( ** value_map )
if use_bulk_create :
unsaved_models . append ( django_object )
else :
try :
django_object . save ( )
except Exception as e :
e . petl_data = value_map
raise e
logger . debug ( 'Bulk creating unsaved {}' . format ( model_name ) )
if use_bulk_create :
_chunked_bulk_create ( model , unsaved_models )
msg = 'Updated {} existing {}' . format ( updated_model_count , model_name )
logger . info ( msg )
msg = 'Created {} new {}' . format ( len ( unsaved_models ) , model_name )
logger . info ( msg )
|
def checkArgs ( args ) :
"""Checks the arguments and options .
: param args : an object containing the options of the program .
: type args : argparse . Namespace
: returns : ` ` True ` ` if everything was OK .
If there is a problem with an option , an exception is raised using the
: py : class : ` ProgramError ` class , a message is printed to the
: class : ` sys . stderr ` and the program exists with code 1."""
|
# Check in input file
if not os . path . isfile ( args . file ) :
msg = "%s: no such file" % args . file
raise ProgramError ( msg )
# Check the population file
if args . population_file is None :
msg = "population-file: no population file"
raise ProgramError ( msg )
elif not os . path . isfile ( args . population_file ) :
msg = "%s: no such file" % args . population_file
raise ProgramError ( msg )
return True
|
def _get_acquisition ( self , model , space ) :
"""Imports the acquisition"""
|
from copy import deepcopy
acqOpt_config = deepcopy ( self . config [ 'acquisition' ] [ 'optimizer' ] )
acqOpt_name = acqOpt_config [ 'name' ]
del acqOpt_config [ 'name' ]
from . . optimization import AcquisitionOptimizer
acqOpt = AcquisitionOptimizer ( space , acqOpt_name , ** acqOpt_config )
from . . acquisitions import select_acquisition
return select_acquisition ( self . config [ 'acquisition' ] [ 'type' ] ) . fromConfig ( model , space , acqOpt , None , self . config [ 'acquisition' ] )
|
def get_object ( self , queryset = None ) :
"""Return the object the view is displaying .
Same as rest _ framework . generics . GenericAPIView , but :
- Failed assertions instead of deprecations"""
|
# Determine the base queryset to use .
assert queryset is None , "Passing a queryset is disabled"
queryset = self . filter_queryset ( self . get_queryset ( ) )
# Perform the lookup filtering .
lookup_url_kwarg = self . lookup_url_kwarg or self . lookup_field
lookup = self . kwargs . get ( lookup_url_kwarg , None )
assert lookup is not None , "Other lookup methods are disabled"
filter_kwargs = { self . lookup_field : lookup }
obj = self . get_object_or_404 ( queryset , ** filter_kwargs )
# May raise a permission denied
self . check_object_permissions ( self . request , obj )
return obj
|
def read_config ( ) :
"""Reads and preprocesses the pydoc - markdown configuration file ."""
|
with open ( PYDOCMD_CONFIG ) as fp :
config = yaml . load ( fp )
return default_config ( config )
|
def spec_var ( model , ph ) :
"""Compute variance of ` ` p ` ` from Fourier coefficients ` ` ph ` ` .
Parameters
model : pyqg . Model instance
The model object from which ` ph ` originates
ph : complex array
The field on which to compute the variance
Returns
var _ dens : float
The variance of ` ph `"""
|
var_dens = 2. * np . abs ( ph ) ** 2 / model . M ** 2
# only half of coefs [ 0 ] and [ nx / 2 + 1 ] due to symmetry in real fft2
var_dens [ ... , 0 ] /= 2
var_dens [ ... , - 1 ] /= 2
return var_dens . sum ( axis = ( - 1 , - 2 ) )
|
def group_modes ( modes ) :
"""Groups consecutive transportation modes with same label , into one
Args :
modes ( : obj : ` list ` of : obj : ` dict ` )
Returns :
: obj : ` list ` of : obj : ` dict `"""
|
if len ( modes ) > 0 :
previous = modes [ 0 ]
grouped = [ ]
for changep in modes [ 1 : ] :
if changep [ 'label' ] != previous [ 'label' ] :
previous [ 'to' ] = changep [ 'from' ]
grouped . append ( previous )
previous = changep
previous [ 'to' ] = modes [ - 1 ] [ 'to' ]
grouped . append ( previous )
return grouped
else :
return modes
|
def get_component_types_from_topic ( topic_id , db_conn = None ) :
"""Returns the component types of a topic ."""
|
db_conn = db_conn or flask . g . db_conn
query = sql . select ( [ models . TOPICS ] ) . where ( models . TOPICS . c . id == topic_id )
topic = db_conn . execute ( query ) . fetchone ( )
topic = dict ( topic )
return topic [ 'component_types' ]
|
def get ( cls , reactor , source = 'graphite' , ** options ) :
"""Get Alert Class by source ."""
|
acls = cls . alerts [ source ]
return acls ( reactor , ** options )
|
def next_moment_operating_on ( self , qubits : Iterable [ ops . Qid ] , start_moment_index : int = 0 , max_distance : int = None ) -> Optional [ int ] :
"""Finds the index of the next moment that touches the given qubits .
Args :
qubits : We ' re looking for operations affecting any of these qubits .
start _ moment _ index : The starting point of the search .
max _ distance : The number of moments ( starting from the start index
and moving forward ) to check . Defaults to no limit .
Returns :
None if there is no matching moment , otherwise the index of the
earliest matching moment .
Raises :
ValueError : negative max _ distance ."""
|
max_circuit_distance = len ( self . _moments ) - start_moment_index
if max_distance is None :
max_distance = max_circuit_distance
elif max_distance < 0 :
raise ValueError ( 'Negative max_distance: {}' . format ( max_distance ) )
else :
max_distance = min ( max_distance , max_circuit_distance )
return self . _first_moment_operating_on ( qubits , range ( start_moment_index , start_moment_index + max_distance ) )
|
def _reshape_conv_array ( self , array , section_height , image_width ) :
'''Reshape a rank 4 array to be rank 2 , where each column of block _ width is
a filter , and each row of block height is an input channel . For example :
[ [ [ [ 11 , 21 , 31 , 41 ] ,
[ 51 , 61 , 71 , 81 ] ,
[ 91 , 101 , 111 , 121 ] ] ,
[ [ 12 , 22 , 32 , 42 ] ,
[ 52 , 62 , 72 , 82 ] ,
[ 92 , 102 , 112 , 122 ] ] ,
[ [ 13 , 23 , 33 , 43 ] ,
[ 53 , 63 , 73 , 83 ] ,
[ 93 , 103 , 113 , 123 ] ] ] ,
[ [ [ 14 , 24 , 34 , 44 ] ,
[ 54 , 64 , 74 , 84 ] ,
[ 94 , 104 , 114 , 124 ] ] ,
[ [ 15 , 25 , 35 , 45 ] ,
[ 55 , 65 , 75 , 85 ] ,
[ 95 , 105 , 115 , 125 ] ] ,
[ [ 16 , 26 , 36 , 46 ] ,
[ 56 , 66 , 76 , 86 ] ,
[ 96 , 106 , 116 , 126 ] ] ] ,
[ [ [ 17 , 27 , 37 , 47 ] ,
[ 57 , 67 , 77 , 87 ] ,
[ 97 , 107 , 117 , 127 ] ] ,
[ [ 18 , 28 , 38 , 48 ] ,
[ 58 , 68 , 78 , 88 ] ,
[ 98 , 108 , 118 , 128 ] ] ,
[ [ 19 , 29 , 39 , 49 ] ,
[ 59 , 69 , 79 , 89 ] ,
[ 99 , 109 , 119 , 129 ] ] ] ]
should be reshaped to :
[ [ 11 , 12 , 13 , 21 , 22 , 23 , 31 , 32 , 33 , 41 , 42 , 43 ] ,
[ 14 , 15 , 16 , 24 , 25 , 26 , 34 , 35 , 36 , 44 , 45 , 46 ] ,
[ 17 , 18 , 19 , 27 , 28 , 29 , 37 , 38 , 39 , 47 , 48 , 49 ] ,
[ 51 , 52 , 53 , 61 , 62 , 63 , 71 , 72 , 73 , 81 , 82 , 83 ] ,
[ 54 , 55 , 56 , 64 , 65 , 66 , 74 , 75 , 76 , 84 , 85 , 86 ] ,
[ 57 , 58 , 59 , 67 , 68 , 69 , 77 , 78 , 79 , 87 , 88 , 89 ] ,
[ 91 , 92 , 93 , 101 , 102 , 103 , 111 , 112 , 113 , 121 , 122 , 123 ] ,
[ 94 , 95 , 96 , 104 , 105 , 106 , 114 , 115 , 116 , 124 , 125 , 126 ] ,
[ 97 , 98 , 99 , 107 , 108 , 109 , 117 , 118 , 119 , 127 , 128 , 129 ] ]'''
|
# E . g . [ 100 , 24 , 24 , 10 ] : this shouldn ' t be reshaped like normal .
if array . shape [ 1 ] == array . shape [ 2 ] and array . shape [ 0 ] != array . shape [ 1 ] :
array = np . rollaxis ( np . rollaxis ( array , 2 ) , 2 )
block_height , block_width , in_channels = array . shape [ : 3 ]
rows = [ ]
max_element_count = section_height * int ( image_width / MIN_SQUARE_SIZE )
element_count = 0
for i in range ( in_channels ) :
rows . append ( array [ : , : , i , : ] . reshape ( block_height , - 1 , order = 'F' ) )
# This line should be left in this position . Gives it one extra row .
if element_count >= max_element_count and not self . config [ 'show_all' ] :
break
element_count += block_height * in_channels * block_width
return np . vstack ( rows )
|
def get_external_instances ( self , phase = None ) :
"""Get a list of external instances ( in a specific phase )
If phase is None , return all external instances whtever the phase
: param phase : phase to filter ( never used )
: type phase :
: return : external instances list
: rtype : list"""
|
if phase is None :
return [ instance for instance in self . instances if instance . is_external ]
return [ instance for instance in self . instances if instance . is_external and phase in instance . phases and instance not in self . to_restart ]
|
def calc_qref_v1 ( self ) :
"""Determine the reference discharge within the given space - time interval .
Required state sequences :
| QZ |
| QA |
Calculated flux sequence :
| QRef |
Basic equation :
: math : ` QRef = \\ frac { QZ _ { new } + QZ _ { old } + QA _ { old } } { 3 } `
Example :
> > > from hydpy . models . lstream import *
> > > parameterstep ( )
> > > states . qz . new = 3.0
> > > states . qz . old = 2.0
> > > states . qa . old = 1.0
> > > model . calc _ qref _ v1 ( )
> > > fluxes . qref
qref ( 2.0)"""
|
new = self . sequences . states . fastaccess_new
old = self . sequences . states . fastaccess_old
flu = self . sequences . fluxes . fastaccess
flu . qref = ( new . qz + old . qz + old . qa ) / 3.
|
def unpack ( self , gpsd_socket_response ) :
"""Sets new socket data as DataStream attributes in those initialised dictionaries
Arguments :
gpsd _ socket _ response ( json object ) :
Provides :
self attribute dictionaries , e . g . , self . TPV [ ' lat ' ] , self . SKY [ ' gdop ' ]
Raises :
AttributeError : ' str ' object has no attribute ' keys ' when the device falls out of the system
ValueError , KeyError : most likely extra , or mangled JSON data , should not happen , but that
applies to a lot of things ."""
|
try :
fresh_data = json . loads ( gpsd_socket_response )
# The reserved word ' class ' is popped from JSON object class
package_name = fresh_data . pop ( 'class' , 'ERROR' )
# gpsd data package errors are also ' ERROR ' .
package = getattr ( self , package_name , package_name )
# packages are named for JSON object class
for key in package . keys ( ) :
package [ key ] = fresh_data . get ( key , 'n/a' )
# Restores ' n / a ' if key is absent in the socket response
except AttributeError : # ' str ' object has no attribute ' keys '
sys . stderr . write ( 'There is an unexpected exception in DataStream.unpack' )
return
except ( ValueError , KeyError ) as error :
sys . stderr . write ( str ( error ) )
# Extra data or aberrant data in stream .
return
|
def load_all ( stream , Loader = None ) :
"""Parse all YAML documents in a stream
and produce corresponding Python objects ."""
|
if Loader is None :
load_warning ( 'load_all' )
Loader = FullLoader
loader = Loader ( stream )
try :
while loader . check_data ( ) :
yield loader . get_data ( )
finally :
loader . dispose ( )
|
def convert_tuple_to_float ( input_tuple ) :
"""Converts a tuple to a float where first element of tuple is before the decimal
and the second element is after the decimal .
Parameters :
input _ tuple : A tuple of two integers
Returns :
A float number which is a combination of both elements of tuple
Examples :
> > > convert _ tuple _ to _ float ( ( 4 , 56 ) )
4.56
> > > convert _ tuple _ to _ float ( ( 7 , 256 ) )
7.256
> > > convert _ tuple _ to _ float ( ( 8 , 123 ) )
8.123"""
|
combined_number = float ( '.' . join ( map ( str , input_tuple ) ) )
return combined_number
|
def mount ( name , device , mkmnt = False , fstype = '' , opts = 'defaults' , user = None , util = 'mount' ) :
'''Mount a device
CLI Example :
. . code - block : : bash
salt ' * ' mount . mount / mnt / foo / dev / sdz1 True'''
|
if util != 'mount' : # This functionality used to live in img . mount _ image
if util == 'guestfs' :
return __salt__ [ 'guestfs.mount' ] ( name , root = device )
elif util == 'qemu_nbd' :
mnt = __salt__ [ 'qemu_nbd.init' ] ( name , device )
if not mnt :
return False
first = next ( six . iterkeys ( mnt ) )
__context__ [ 'img.mnt_{0}' . format ( first ) ] = mnt
return first
return False
# Darwin doesn ' t expect defaults when mounting without other options
if 'defaults' in opts and __grains__ [ 'os' ] in [ 'MacOS' , 'Darwin' , 'AIX' ] :
opts = None
if isinstance ( opts , six . string_types ) :
opts = opts . split ( ',' )
if not os . path . exists ( name ) and mkmnt :
__salt__ [ 'file.mkdir' ] ( name , user = user )
args = ''
if opts is not None :
lopts = ',' . join ( opts )
args = '-o {0}' . format ( lopts )
if fstype : # use of fstype on AIX differs from typical Linux use of - t
# functionality AIX uses - v vfsname , - t fstype mounts all with
# fstype in / etc / filesystems
if 'AIX' in __grains__ [ 'os' ] :
args += ' -v {0}' . format ( fstype )
elif 'solaris' in __grains__ [ 'os' ] . lower ( ) :
args += ' -F {0}' . format ( fstype )
else :
args += ' -t {0}' . format ( fstype )
cmd = 'mount {0} {1} {2} ' . format ( args , device , name )
out = __salt__ [ 'cmd.run_all' ] ( cmd , runas = user , python_shell = False )
if out [ 'retcode' ] :
return out [ 'stderr' ]
return True
|
def autopage ( self ) :
"""Iterate through results from all pages .
: return : all results
: rtype : generator"""
|
while self . items :
yield from self . items
self . items = self . fetch_next ( )
|
def get_connection_string ( params , hide_password = True ) :
"""Get a database connection string
Args :
params ( dict ) : database configuration , as defined in : mod : ` ozelot . config `
hide _ password ( bool ) : if True , the password is hidden in the returned string
( use this for logging purposes ) .
Returns :
str : connection string"""
|
connection_string = params [ 'driver' ] + '://'
user = params . get ( 'user' , None )
password = params . get ( 'password' , None )
host = params . get ( 'host' , None )
port = params . get ( 'port' , None )
database = params . get ( 'database' , None )
if database is None :
raise ValueError ( "Field 'database' of connection parameters cannot be None." )
# if password is not set , try to get it from keyring
if password is None and user is not None : # noinspection PyTypeChecker
password = Client . _get_password ( params )
if password is None :
raise RuntimeError ( "Password not defined and not available in keyring." )
# don ' t add host / port / user / password if no host given
if host is not None : # don ' t add user / password if user not given
if user is not None :
connection_string += user
# omit zero - length passwords
if len ( password ) > 0 :
if hide_password :
connection_string += ":[password hidden]"
else :
connection_string += ":" + password
connection_string += "@"
connection_string += host
if port is not None :
connection_string += ':' + str ( port )
# noinspection PyTypeChecker
connection_string += '/' + database
return connection_string
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.