signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def connect ( self , node , properties = None ) :
"""Connect to a node .
: param node :
: type : StructuredNode
: param properties : relationship properties
: type : dict
: return : True / rel instance"""
|
if len ( self ) :
raise AttemptedCardinalityViolation ( "Node already has {0} can't connect more" . format ( self ) )
else :
return super ( ZeroOrOne , self ) . connect ( node , properties )
|
def all_functions_called ( self ) :
'''list ( Function ) : List of functions reachable from the contract ( include super )'''
|
all_calls = [ f . all_internal_calls ( ) for f in self . functions + self . modifiers ] + [ self . functions + self . modifiers ]
all_calls = [ item for sublist in all_calls for item in sublist ] + self . functions
all_calls = list ( set ( all_calls ) )
all_constructors = [ c . constructor for c in self . inheritance ]
all_constructors = list ( set ( [ c for c in all_constructors if c ] ) )
all_calls = set ( all_calls + all_constructors )
return [ c for c in all_calls if isinstance ( c , Function ) ]
|
def from_nds2 ( cls , nds2channel ) :
"""Generate a new channel using an existing nds2 . channel object"""
|
# extract metadata
name = nds2channel . name
sample_rate = nds2channel . sample_rate
unit = nds2channel . signal_units
if not unit :
unit = None
ctype = nds2channel . channel_type_to_string ( nds2channel . channel_type )
# get dtype
dtype = { # pylint : disable : no - member
nds2channel . DATA_TYPE_INT16 : numpy . int16 , nds2channel . DATA_TYPE_INT32 : numpy . int32 , nds2channel . DATA_TYPE_INT64 : numpy . int64 , nds2channel . DATA_TYPE_FLOAT32 : numpy . float32 , nds2channel . DATA_TYPE_FLOAT64 : numpy . float64 , nds2channel . DATA_TYPE_COMPLEX32 : numpy . complex64 , } . get ( nds2channel . data_type )
return cls ( name , sample_rate = sample_rate , unit = unit , dtype = dtype , type = ctype )
|
def compare_digest ( a , b ) :
"""Compare 2 hash digest ."""
|
py_version = sys . version_info [ 0 ]
if py_version >= 3 :
return _compare_digest_py3 ( a , b )
return _compare_digest_py2 ( a , b )
|
def adjoint ( self ) :
"""Adjoint of this operator .
Returns
adjoint : ` PointwiseInner `"""
|
return PointwiseInner ( vfspace = self . range , vecfield = self . vecfield , weighting = self . weights )
|
def register_computer_view ( request ) :
"""Register a computer ."""
|
if request . method == "POST" :
form = ComputerRegistrationForm ( request . POST )
logger . debug ( form )
if form . is_valid ( ) :
obj = form . save ( )
obj . user = request . user
obj . save ( )
messages . success ( request , "Successfully added computer." )
return redirect ( "itemreg" )
else :
messages . error ( request , "Error adding computer." )
else :
form = ComputerRegistrationForm ( )
return render ( request , "itemreg/register_form.html" , { "form" : form , "action" : "add" , "type" : "computer" , "form_route" : "itemreg_computer" } )
|
def mapping_create ( index , doc_type , body = None , hosts = None , profile = None , source = None ) :
'''Create a mapping in a given index
index
Index for the mapping
doc _ type
Name of the document type
body
Mapping definition as specified in https : / / www . elastic . co / guide / en / elasticsearch / reference / current / indices - put - mapping . html
source
URL to file specifying mapping definition . Cannot be used in combination with ` ` body ` ` .
CLI example : :
salt myminion elasticsearch . mapping _ create testindex user ' { " user " : { " properties " : { " message " : { " type " : " string " , " store " : true } } } } ' '''
|
es = _get_instance ( hosts , profile )
if source and body :
message = 'Either body or source should be specified but not both.'
raise SaltInvocationError ( message )
if source :
body = __salt__ [ 'cp.get_file_str' ] ( source , saltenv = __opts__ . get ( 'saltenv' , 'base' ) )
try :
result = es . indices . put_mapping ( index = index , doc_type = doc_type , body = body )
return result . get ( 'acknowledged' , False )
except elasticsearch . TransportError as e :
raise CommandExecutionError ( "Cannot create mapping {0}, server returned code {1} with message {2}" . format ( index , e . status_code , e . error ) )
|
def functions ( self ) :
"""Return an iterator over this module ' s functions .
The iterator will yield a ValueRef for each function ."""
|
it = ffi . lib . LLVMPY_ModuleFunctionsIter ( self )
return _FunctionsIterator ( it , dict ( module = self ) )
|
def rank ( tensor : BKTensor ) -> int :
"""Return the number of dimensions of a tensor"""
|
if isinstance ( tensor , np . ndarray ) :
return len ( tensor . shape )
return len ( tensor [ 0 ] . size ( ) )
|
def spinner ( self , spinner = None ) :
"""Setter for spinner property .
Parameters
spinner : dict , str
Defines the spinner value with frame and interval"""
|
self . _spinner = self . _get_spinner ( spinner )
self . _frame_index = 0
self . _text_index = 0
|
def users_feature ( app ) :
"""Add users feature
Allows to register users and assign groups , instantiates flask login , flask principal
and oauth integration"""
|
# check we have jwt secret configures
if not app . config . get ( 'USER_JWT_SECRET' , None ) :
raise x . JwtSecretMissing ( 'Please set USER_JWT_SECRET in config' )
# use custom session interface
app . session_interface = BoilerSessionInterface ( )
# init user service
user_service . init ( app )
# init login manager
login_manager . init_app ( app )
login_manager . login_view = 'user.login'
login_manager . login_message = None
@ login_manager . user_loader
def load_user ( id ) :
return user_service . get ( id )
# init OAuth
oauth . init_app ( app )
registry = OauthProviders ( app )
providers = registry . get_providers ( )
with app . app_context ( ) :
for provider in providers :
if provider not in oauth . remote_apps :
oauth . remote_app ( provider , ** providers [ provider ] )
registry . register_token_getter ( provider )
# init principal
principal . init_app ( app )
@ principal . identity_loader
def load_identity ( ) :
if current_user . is_authenticated :
return Identity ( current_user . id )
session . pop ( 'identity.name' , None )
session . pop ( 'identity.auth_type' , None )
return AnonymousIdentity ( )
@ identity_loaded . connect_via ( app )
def on_identity_loaded ( sender , identity ) :
identity . user = current_user
if not current_user . is_authenticated :
return
identity . provides . add ( UserNeed ( current_user . id ) )
for role in current_user . roles :
identity . provides . add ( RoleNeed ( role . handle ) )
|
def get_relationships ( self ) :
"""Gets all ` ` Relationships ` ` .
return : ( osid . relationship . RelationshipList ) - a list of
` ` Relationships ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
* compliance : mandatory - - This method must be implemented . *"""
|
# Implemented from template for
# osid . resource . ResourceLookupSession . get _ resources
# NOTE : This implementation currently ignores plenary view
collection = JSONClientValidated ( 'relationship' , collection = 'Relationship' , runtime = self . _runtime )
result = collection . find ( self . _view_filter ( ) ) . sort ( '_id' , DESCENDING )
return objects . RelationshipList ( result , runtime = self . _runtime , proxy = self . _proxy )
|
def _allocate_output ( self , windows , shape ) :
"""Override the default array allocation to produce a LabelArray when we
have a string - like dtype ."""
|
if self . dtype == int64_dtype :
return super ( CustomClassifier , self ) . _allocate_output ( windows , shape , )
# This is a little bit of a hack . We might not know what the
# categories for a LabelArray are until it ' s actually been loaded , so
# we need to look at the underlying data .
return windows [ 0 ] . data . empty_like ( shape )
|
def cross_validate ( data = None , folds = 5 , repeat = 1 , metrics = None , reporters = None , model_def = None , ** kwargs ) :
"""Shortcut to cross - validate a single configuration .
ModelDefinition variables are passed in as keyword args , along
with the cross - validation parameters ."""
|
md_kwargs = { }
if model_def is None :
for arg in ModelDefinition . params :
if arg in kwargs :
md_kwargs [ arg ] = kwargs . pop ( arg )
model_def = ModelDefinition ( ** md_kwargs )
if metrics is None :
metrics = [ ]
if reporters is None :
reporters = [ ]
metrics = [ MetricReporter ( metric ) for metric in metrics ]
results = modeling . cross_validate ( model_def , data , folds , repeat = repeat , ** kwargs )
for r in reporters + metrics :
r . process_results ( results )
return CVResult ( results , reporters , metrics )
|
def as_hyperbola ( self , rotated = False ) :
"""Hyperbolic error area"""
|
idx = N . diag_indices ( 3 )
_ = 1 / self . covariance_matrix [ idx ]
d = list ( _ )
d [ - 1 ] *= - 1
arr = N . identity ( 4 ) * - 1
arr [ idx ] = d
hyp = conic ( arr )
if rotated :
R = augment ( self . axes )
hyp = hyp . transform ( R )
return hyp
|
def derive ( self , peerkey , ** kwargs ) :
"""Derives shared key ( DH , ECDH , VKO 34.10 ) . Requires
private key available
@ param peerkey - other key ( may be public only )
Keyword parameters are algorithm - specific"""
|
if not self . cansign :
raise ValueError ( "No private key available" )
ctx = libcrypto . EVP_PKEY_CTX_new ( self . key , None )
if ctx is None :
raise PKeyError ( "Initailizing derive context" )
if libcrypto . EVP_PKEY_derive_init ( ctx ) < 1 :
raise PKeyError ( "derive_init" )
# This is workaround around missing functionality in GOST engine
# it provides only numeric control command to set UKM , not
# string one .
self . _configure_context ( ctx , kwargs , [ "ukm" ] )
if libcrypto . EVP_PKEY_derive_set_peer ( ctx , peerkey . key ) <= 0 :
raise PKeyError ( "Cannot set peer key" )
if "ukm" in kwargs : # We just hardcode numeric command to set UKM here
if libcrypto . EVP_PKEY_CTX_ctrl ( ctx , - 1 , 1 << 10 , 8 , 8 , kwargs [ "ukm" ] ) <= 0 :
raise PKeyError ( "Cannot set UKM" )
keylen = c_long ( 0 )
if libcrypto . EVP_PKEY_derive ( ctx , None , byref ( keylen ) ) <= 0 :
raise PKeyError ( "computing shared key length" )
buf = create_string_buffer ( keylen . value )
if libcrypto . EVP_PKEY_derive ( ctx , buf , byref ( keylen ) ) <= 0 :
raise PKeyError ( "computing actual shared key" )
libcrypto . EVP_PKEY_CTX_free ( ctx )
return buf . raw [ : int ( keylen . value ) ]
|
def __pathToTuple ( self , path ) :
"""Convert directory or file path to its tuple identifier .
Parameters
path : str
Path to convert . It can look like / , / directory , / directory / or / directory / filename .
Returns
tup _ id : tuple
Two element tuple identifier of directory / file of ( ` directory ` , ` filename ` ) format . If path leads to main
directory , then both fields of tuple will be ` ` None ` ` . If path leads to a directory , then field ` filename `
will be ` ` None ` ` .
Raises
YTFS . PathConvertError
When invalid path is given ."""
|
if not path or path . count ( '/' ) > 2 :
raise YTFS . PathConvertError ( "Bad path given" )
# empty or too deep path
try :
split = path . split ( '/' )
except ( AttributeError , TypeError ) :
raise TypeError ( "Path has to be string" )
# path is not a string
if split [ 0 ] :
raise YTFS . PathConvertError ( "Path needs to start with '/'" )
# path doesn ' t start with ' / ' .
del split [ 0 ]
try :
if not split [ - 1 ] :
split . pop ( )
# given path ended with ' / ' .
except IndexError :
raise YTFS . PathConvertError ( "Bad path given" )
# at least one element in split should exist at the moment
if len ( split ) > 2 :
raise YTFS . PathConvertError ( "Path is too deep. Max allowed level is 2" )
# should happen due to first check , but . . .
try :
d = split [ 0 ]
except IndexError :
d = None
try :
f = split [ 1 ]
except IndexError :
f = None
if not d and f :
raise YTFS . PathConvertError ( "Bad path given" )
# filename is present , but directory is not # sheeeeeiiit
return ( d , f )
|
def locate ( self , image ) :
"""Gets the URL of the given ` ` image ` ` .
: param image : the image to get its url
: type image : : class : ` sqlalchemy _ imageattach . entity . Image `
: returns : the url of the image
: rtype : : class : ` str `"""
|
from . entity import Image
if not isinstance ( image , Image ) :
raise TypeError ( 'image must be a sqlalchemy_imageattach.entity.' 'Image instance, not ' + repr ( image ) )
url = self . get_url ( image . object_type , image . object_id , image . width , image . height , image . mimetype )
if '?' in url :
fmt = '{0}&_ts={1}'
else :
fmt = '{0}?_ts={1}'
return fmt . format ( url , image . created_at . strftime ( '%Y%m%d%H%M%S%f' ) )
|
def detectNintendo ( self ) :
"""Return detection of Nintendo
Detects if the current device is a Nintendo game device ."""
|
return UAgentInfo . deviceNintendo in self . __userAgent or UAgentInfo . deviceNintendo in self . __userAgent or UAgentInfo . deviceNintendo in self . __userAgent
|
def get_query_batch_request ( self , batch_id , job_id = None ) :
"""Fetch the request sent for the batch . Note should only used for query batches"""
|
if not job_id :
job_id = self . lookup_job_id ( batch_id )
url = self . endpoint + "/job/{}/batch/{}/request" . format ( job_id , batch_id )
resp = requests . get ( url , headers = self . headers ( ) )
self . check_status ( resp )
return resp . text
|
def _add_replace_pair ( self , name , value , quote ) :
"""Adds a replace part to the map of replace pairs .
: param name : The name of the replace pair .
: param value : The value of value of the replace pair ."""
|
key = '@' + name + '@'
key = key . lower ( )
class_name = value . __class__ . __name__
if class_name in [ 'int' , 'float' ] :
value = str ( value )
elif class_name in [ 'bool' ] :
value = '1' if value else '0'
elif class_name in [ 'str' ] :
if quote :
value = "'" + value + "'"
else :
self . _io . log_verbose ( "Ignoring constant {} which is an instance of {}" . format ( name , class_name ) )
self . _replace_pairs [ key ] = value
|
def _get_backtrace ( self , frames , inspect_packages = False , depth = 0 ) :
'''get a nicely formatted backtrace
since - - 7-6-12
frames - - list - - the frame _ tuple frames to format
inpsect _ packages - - boolean - - by default , this only prints code of packages that are not
in the pythonN directories , that cuts out a lot of the noise , set this to True if you
want a full stacktrace
depth - - integer - - how deep you want the stack trace to print ( ie , if you only care about
the last three calls , pass in depth = 3 so you only get the last 3 rows of the stack )
return - - list - - each line will be a nicely formatted entry of the backtrace'''
|
calls = [ ]
# count = 1
# for count , f in enumerate ( frames [ 1 : ] , 1 ) :
for count , f in enumerate ( frames , 1 ) : # prev _ f = frames [ i ]
# called _ module = inspect . getmodule ( prev _ f [ 0 ] ) . _ _ name _ _
# called _ func = prev _ f [ 3]
call = self . call_class ( f )
s = self . _get_call_summary ( call , inspect_packages = inspect_packages , index = count )
calls . append ( s )
# count + = 1
if depth and ( count > depth ) :
break
# reverse the order on return so most recent is on the bottom
return calls [ : : - 1 ]
|
def as_uni_errors ( form ) :
"""Renders only form errors like django - uni - form : :
{ % load uni _ form _ tags % }
{ { form | as _ uni _ errors } }"""
|
if isinstance ( form , BaseFormSet ) :
template = get_template ( 'uni_form/errors_formset.html' )
c = Context ( { 'formset' : form } )
else :
template = get_template ( 'uni_form/errors.html' )
c = Context ( { 'form' : form } )
return template . render ( c )
|
def resp_set_location ( self , resp , location = None ) :
"""Default callback for get _ location / set _ location"""
|
if location :
self . location = location
elif resp :
self . location = resp . label . decode ( ) . replace ( "\x00" , "" )
|
def _assemble_and_send_request ( self ) :
"""Fires off the Fedex request .
@ warning : NEVER CALL THIS METHOD DIRECTLY . CALL send _ request ( ) , WHICH RESIDES
ON FedexBaseService AND IS INHERITED ."""
|
client = self . client
# We get an exception like this when specifying an IntegratorId :
# suds . TypeNotFound : Type not found : ' IntegratorId '
# Setting it to None does not seem to appease it .
del self . ClientDetail . IntegratorId
# Fire off the query .
response = client . service . postalCodeInquiry ( WebAuthenticationDetail = self . WebAuthenticationDetail , ClientDetail = self . ClientDetail , TransactionDetail = self . TransactionDetail , Version = self . VersionId , PostalCode = self . PostalCode , CountryCode = self . CountryCode , CarrierCode = self . CarrierCode )
return response
|
def data_received ( self , data ) :
'''override asyncio . Protocol'''
|
self . _buffered_data . extend ( data )
while self . _buffered_data :
size = len ( self . _buffered_data )
if self . _data_package is None :
if size < DataPackage . struct_datapackage . size :
return None
self . _data_package = DataPackage ( self . _buffered_data )
if size < self . _data_package . length :
return None
try :
self . _data_package . extract_data_from ( self . _buffered_data )
except KeyError as e :
logging . error ( 'Unsupported package received: {}' . format ( e ) )
except Exception as e :
logging . exception ( e )
# empty the byte - array to recover from this error
self . _buffered_data . clear ( )
else :
self . _on_package_received ( )
self . _data_package = None
|
def procs ( ) :
'''Return the process data
. . versionchanged : : 2016.11.4
Added support for AIX
CLI Example :
. . code - block : : bash
salt ' * ' status . procs'''
|
# Get the user , pid and cmd
ret = { }
uind = 0
pind = 0
cind = 0
plines = __salt__ [ 'cmd.run' ] ( __grains__ [ 'ps' ] , python_shell = True ) . splitlines ( )
guide = plines . pop ( 0 ) . split ( )
if 'USER' in guide :
uind = guide . index ( 'USER' )
elif 'UID' in guide :
uind = guide . index ( 'UID' )
if 'PID' in guide :
pind = guide . index ( 'PID' )
if 'COMMAND' in guide :
cind = guide . index ( 'COMMAND' )
elif 'CMD' in guide :
cind = guide . index ( 'CMD' )
for line in plines :
if not line :
continue
comps = line . split ( )
ret [ comps [ pind ] ] = { 'user' : comps [ uind ] , 'cmd' : ' ' . join ( comps [ cind : ] ) }
return ret
|
def _inflate ( cls , data ) :
"""Update config by deserialising input dictionary"""
|
for k , v in data [ Constants . CONFIG_KEY ] . items ( ) :
setattr ( cls , k , v )
return cls . _deflate ( )
|
def check_is_declared_explicit ( lineno , id_ , classname = 'variable' ) :
"""Check if the current ID is already declared .
If not , triggers a " undeclared identifier " error ,
if the - - explicit command line flag is enabled ( or # pragma
option strict is in use ) .
If not in strict mode , passes it silently ."""
|
if not config . OPTIONS . explicit . value :
return True
entry = global_ . SYMBOL_TABLE . check_is_declared ( id_ , lineno , classname )
return entry is not None
|
def get_osid_containable_mdata ( ) :
"""Return default mdata map for OsidContainable"""
|
return { 'sequestered' : { 'element_label' : { 'text' : 'sequestered' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'instructions' : { 'text' : 'enter either true or false.' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'required' : False , 'read_only' : False , 'linked' : False , 'default_boolean_values' : [ False ] , 'array' : False , 'syntax' : 'BOOLEAN' , } }
|
def partial_transform ( self , traj ) :
"""Featurize an MD trajectory into a vector space derived from
residue - residue distances
Parameters
traj : mdtraj . Trajectory
A molecular dynamics trajectory to featurize .
Returns
features : np . ndarray , dtype = float , shape = ( n _ samples , n _ features )
A featurized trajectory is a 2D array of shape
` ( length _ of _ trajectory x n _ features ) ` where each ` features [ i ] `
vector is computed by applying the featurization function
to the ` i ` th snapshot of the input trajectory .
See Also
transform : simultaneously featurize a collection of MD trajectories"""
|
# check to make sure topologies are consistent with the reference frame
try :
assert traj . top == self . reference_frame . top
except :
warnings . warn ( "The topology of the trajectory is not" + "the same as that of the reference frame," + "which might give meaningless results." )
distances , _ = md . compute_contacts ( traj , self . contacts , self . scheme , ignore_nonprotein = False , periodic = self . periodic )
return self . _transform ( distances )
|
def _build_meta ( text : str , title : str ) -> DocstringMeta :
"""Build docstring element .
: param text : docstring element text
: param title : title of section containing element
: return :"""
|
meta = _sections [ title ]
if meta == "returns" and ":" not in text . split ( ) [ 0 ] :
return DocstringMeta ( [ meta ] , description = text )
# Split spec and description
before , desc = text . split ( ":" , 1 )
if desc :
desc = desc [ 1 : ] if desc [ 0 ] == " " else desc
if "\n" in desc :
first_line , rest = desc . split ( "\n" , 1 )
desc = first_line + "\n" + inspect . cleandoc ( rest )
desc = desc . strip ( "\n" )
# Build Meta args
m = re . match ( r"(\S+) \((\S+)\)$" , before )
if meta == "param" and m :
arg_name , type_name = m . group ( 1 , 2 )
args = [ meta , type_name , arg_name ]
else :
args = [ meta , before ]
return DocstringMeta ( args , description = desc )
|
def remover_provisionamento ( self , equipamentos , vips ) :
"""Remove o provisionamento de um grupo virtual para o sistema de Orquestração VM .
: param equipamentos : Lista de equipamentos gerada pelo método " add _ equipamento _ remove " da
classe " EspecificacaoGrupoVirtual " .
: param vips : Lista de VIPs gerada pelo método " add _ vip _ remove " da classe " EspecificacaoGrupoVirtual " .
: return : None
: raise InvalidParameterError : Algum dado obrigatório não foi informado nas listas ou possui um valor inválido .
: raise IpNaoExisteError : IP não cadastrado .
: raise EquipamentoNaoExisteError : Equipamento não cadastrado .
: raise IpError : IP não está associado ao equipamento .
: raise DataBaseError : Falha na networkapi ao acessar o banco de dados .
: raise XMLError : Falha na networkapi ao ler o XML de requisição ou gerar o XML de resposta ."""
|
code , map = self . submit ( { 'equipamentos' : { 'equipamento' : equipamentos } , 'vips' : { 'vip' : vips } } , 'DELETE' , 'grupovirtual/' )
return self . response ( code , map )
|
def render_docstring ( self ) :
"""make a nice docstring for ipython"""
|
default = ( ' = ' + str ( self . default ) ) if self . default else ''
opt = 'optional' if self . is_optional else ''
can_be = ' ' . join ( self . possible_values ) if self . possible_values else ''
can_be = 'one of [{}]' . format ( can_be ) if can_be else ''
type_ = 'of type "' + str ( self . type ) + '"'
res = ' ' . join ( [ opt , '"' + self . field + '"' , default , type_ , can_be , '\n' ] )
return res . replace ( ' ' , ' ' ) . lstrip ( )
|
def gene_name ( st , exclude = ( "ev" , ) , sep = "." ) :
"""Helper functions in the BLAST filtering to get rid alternative splicings .
This is ugly , but different annotation groups are inconsistent with respect
to how the alternative splicings are named . Mostly it can be done by removing
the suffix , except for ones in the exclude list ."""
|
if any ( st . startswith ( x ) for x in exclude ) :
sep = None
st = st . split ( '|' ) [ 0 ]
if sep and sep in st :
name , suffix = st . rsplit ( sep , 1 )
else :
name , suffix = st , ""
# We only want to remove suffix that are isoforms , longer suffix would
# suggest that it is part of the right gene name
if len ( suffix ) != 1 :
name = st
return name
|
def saveNetworkToFile ( self , filename , makeWrapper = 1 , mode = "pickle" , counter = None ) :
"""Deprecated ."""
|
if "?" in filename : # replace ? pattern in filename with epoch number
import re
char = "?"
match = re . search ( re . escape ( char ) + "+" , filename )
if match :
num = self . epoch
if counter != None :
num = counter
elif self . totalEpoch != 0 : # use a total epoch , if one :
num = self . totalEpoch
fstring = "%%0%dd" % len ( match . group ( ) )
filename = filename [ : match . start ( ) ] + fstring % num + filename [ match . end ( ) : ]
self . lastAutoSaveNetworkFilename = filename
if mode == "pickle" : # dump network via pickle :
import pickle
basename = filename . split ( '.' ) [ 0 ]
filename += ".pickle"
fp = open ( filename , 'w' )
pickle . dump ( self , fp )
fp . close ( )
# make wrapper python file :
if makeWrapper :
fp = open ( basename + ".py" , "w" )
fp . write ( "from pyrobot.brain.conx import *\n" )
fp . write ( "import pickle\n" )
fp . write ( "fp = open('%s', 'r')\n" % filename )
fp . write ( "network = pickle.load(fp)" )
fp . close ( )
# give some help :
print ( "To load network:" )
print ( " %% python -i %s " % ( basename + ".py" ) )
print ( " >>> network.train() # for example" )
print ( "--- OR ---" )
print ( " % python" )
print ( " >>> from pyrobot.brain.conx import *" )
print ( " >>> network = loadNetwork(%s)" % filename )
print ( " >>> network.train() # for example" )
elif mode in [ "plain" , "conx" ] :
fp = open ( filename , "w" )
fp . write ( "network, %s\n" % ( self . __class__ . __name__ ) )
for layer in self . layers :
fp . write ( "layer, %s, %s\n" % ( layer . name , layer . size ) )
# biases :
for i in range ( layer . size ) :
fp . write ( "%f " % layer . weight [ i ] )
fp . write ( "\n" )
for connection in self . connections :
fp . write ( "connection, %s, %s\n" % ( connection . fromLayer . name , connection . toLayer . name ) )
# weights :
for i in range ( connection . fromLayer . size ) :
for j in range ( connection . toLayer . size ) :
fp . write ( "%f " % connection . weight [ i ] [ j ] )
fp . write ( "\n" )
fp . close ( )
|
def _delete_node ( self , tree , node ) :
"""Private function that eliminate node from tree .
Parameters
tree : object
node : int
node to be eliminated from tree
Returns
pruned _ tree : object"""
|
# Calculate gains
temp_tree = copy . deepcopy ( tree )
def recourse ( temp_tree_ , del_node ) :
if isinstance ( temp_tree_ , dict ) :
if temp_tree_ [ 'split' ] != - 1 :
if temp_tree_ [ 'node' ] == del_node :
del temp_tree_ [ 'sr' ]
del temp_tree_ [ 'sl' ]
del temp_tree_ [ 'node' ]
temp_tree_ [ 'split' ] = - 1
else :
for k in [ 'sl' , 'sr' ] :
recourse ( temp_tree_ [ k ] , del_node )
return None
recourse ( temp_tree , node )
return temp_tree
|
def log ( x , base = None ) :
"""Calculate the log
Parameters
x : float or array _ like
Input values
base : int or float ( Default : None )
Base of the log . If ` None ` , the natural logarithm
is computed ( ` base = np . e ` ) .
Returns
out : float or ndarray
Calculated result"""
|
if base == 10 :
return np . log10 ( x )
elif base == 2 :
return np . log2 ( x )
elif base is None or base == np . e :
return np . log ( x )
else :
return np . log ( x ) / np . log ( base )
|
def log_request_fail ( self , method , full_url , body , duration , status_code = None , exception = None ) :
"""Log an unsuccessful API call ."""
|
logger . warning ( '%s %s [status:%s request:%.3fs]' , method , full_url , status_code or 'N/A' , duration , exc_info = exception is not None )
if body and not isinstance ( body , dict ) :
body = body . decode ( 'utf-8' )
logger . debug ( '> %s' , body )
|
def tag_users ( self , tag_id , open_id_list ) :
"""批量为用户打标签
: param tag _ id : 标签 ID
: param open _ id _ list : 包含一个或多个用户的 OPENID 的列表
: return : 返回的 JSON 数据包"""
|
return self . post ( url = "https://api.weixin.qq.com/cgi-bin/tags/members/batchtagging" , data = { "openid_list" : open_id_list , "tagid" : tag_id } )
|
def get_buckets ( self , bucket_type = None , timeout = None ) :
"""Fetch a list of all buckets"""
|
bucket_type = self . _get_bucket_type ( bucket_type )
url = self . bucket_list_path ( bucket_type = bucket_type , timeout = timeout )
status , headers , body = self . _request ( 'GET' , url )
if status == 200 :
props = json . loads ( bytes_to_str ( body ) )
return props [ 'buckets' ]
else :
raise RiakError ( 'Error getting buckets.' )
|
def get_start_and_end_time ( self , ref = None ) :
"""Specific function to get start time and end time for WeekDayDaterange
: param ref : time in seconds
: type ref : int
: return : tuple with start and end time
: rtype : tuple ( int , int )"""
|
now = time . localtime ( ref )
# If no year , it ' s our year
if self . syear == 0 :
self . syear = now . tm_year
month_start_id = now . tm_mon
day_start = find_day_by_weekday_offset ( self . syear , month_start_id , self . swday , self . swday_offset )
start_time = get_start_of_day ( self . syear , month_start_id , day_start )
# Same for end year
if self . eyear == 0 :
self . eyear = now . tm_year
month_end_id = now . tm_mon
day_end = find_day_by_weekday_offset ( self . eyear , month_end_id , self . ewday , self . ewday_offset )
end_time = get_end_of_day ( self . eyear , month_end_id , day_end )
# Maybe end _ time is before start . So look for the
# next month
if start_time > end_time :
month_end_id += 1
if month_end_id > 12 :
month_end_id = 1
self . eyear += 1
day_end = find_day_by_weekday_offset ( self . eyear , month_end_id , self . ewday , self . ewday_offset )
end_time = get_end_of_day ( self . eyear , month_end_id , day_end )
now_epoch = time . mktime ( now )
# But maybe we look not enought far . We should add a month
if end_time < now_epoch :
month_end_id += 1
month_start_id += 1
if month_end_id > 12 :
month_end_id = 1
self . eyear += 1
if month_start_id > 12 :
month_start_id = 1
self . syear += 1
# First start
day_start = find_day_by_weekday_offset ( self . syear , month_start_id , self . swday , self . swday_offset )
start_time = get_start_of_day ( self . syear , month_start_id , day_start )
# Then end
day_end = find_day_by_weekday_offset ( self . eyear , month_end_id , self . ewday , self . ewday_offset )
end_time = get_end_of_day ( self . eyear , month_end_id , day_end )
return ( start_time , end_time )
|
def _str_superbox ( self ) :
"""_ _ str _ _ method for all superboxes ."""
|
msg = Jp2kBox . __str__ ( self )
for box in self . box :
boxstr = str ( box )
# Indent the child boxes to make the association clear .
msg += '\n' + self . _indent ( boxstr )
return msg
|
def pdf_row_limiter ( rows , limits = None , ** kwargs ) :
"""Limit row passing a value . In this case we dont implementate a best effort
algorithm because the posibilities are infite with a data text structure
from a pdf ."""
|
limits = limits or [ None , None ]
upper_limit = limits [ 0 ] if limits else None
lower_limit = limits [ 1 ] if len ( limits ) > 1 else None
return rows [ upper_limit : lower_limit ]
|
def resume ( self ) :
"""Resume progress updates ."""
|
with self . condition :
self . paused = False
self . condition . notify_all ( )
|
def valid ( cls , data , context = None ) :
"""Shortcut to create a VALID Token ."""
|
return cls ( cls . TagType . VALID , data , context )
|
def get_plural ( amount , variants , absence = None ) :
"""Get proper case with value
@ param amount : amount of objects
@ type amount : C { integer types }
@ param variants : variants ( forms ) of object in such form :
(1 object , 2 objects , 5 objects ) .
@ type variants : 3 - element C { sequence } of C { unicode }
or C { unicode } ( three variants with delimeter ' , ' )
@ param absence : if amount is zero will return it
@ type absence : C { unicode }
@ return : amount with proper variant
@ rtype : C { unicode }"""
|
if amount or absence is None :
return u"%d %s" % ( amount , choose_plural ( amount , variants ) )
else :
return absence
|
def toggle_hscrollbar ( self , checked ) :
"""Toggle horizontal scrollbar"""
|
self . parent_widget . sig_option_changed . emit ( 'show_hscrollbar' , checked )
self . show_hscrollbar = checked
self . header ( ) . setStretchLastSection ( not checked )
self . header ( ) . setHorizontalScrollMode ( QAbstractItemView . ScrollPerPixel )
try :
self . header ( ) . setSectionResizeMode ( QHeaderView . ResizeToContents )
except : # support for qtpy < 1.2.0
self . header ( ) . setResizeMode ( QHeaderView . ResizeToContents )
|
def arguments ( self , args = None ) :
'''Read in arguments for the current subcommand . These are added to the
cmd line without ' - - ' appended . Any others are redirected as standard
options with the double hyphen prefixed .'''
|
# permits deleting elements rather than using slices
args = args and list ( args ) or [ ]
# match against all known / supported subcmds
if self . subcmd == 'apply' : # apply subcommand requires a manifest file to execute
self . subcmd_args = [ args [ 0 ] ]
del args [ 0 ]
if self . subcmd == 'agent' : # no arguments are required
args . extend ( [ 'test' ] )
# finally do this after subcmd has been matched for all remaining args
self . args = args
|
def fix_germline_samplename ( in_file , sample_name , data ) :
"""Replace germline sample names , originally from normal BAM file ."""
|
out_file = "%s-fixnames%s" % utils . splitext_plus ( in_file )
if not utils . file_exists ( out_file ) :
with file_transaction ( data , out_file ) as tx_out_file :
sample_file = "%s-samples.txt" % utils . splitext_plus ( tx_out_file ) [ 0 ]
with open ( sample_file , "w" ) as out_handle :
out_handle . write ( "%s\n" % sample_name )
cmd = ( "bcftools reheader -s {sample_file} {in_file} -o {tx_out_file}" )
do . run ( cmd . format ( ** locals ( ) ) , "Fix germline samplename: %s" % sample_name )
return vcfutils . bgzip_and_index ( out_file , data [ "config" ] )
|
def find_index ( array , x ) :
"""Locate the leftmost value exactly equal to x .
: type array : list
: param array : an iterable object that support inex
: param x : a comparable value
* * 中文文档 * *
返回第一个值等于x的元素的索引 。"""
|
i = bisect . bisect_left ( array , x )
if i != len ( array ) and array [ i ] == x :
return i
raise ValueError
|
def drop_table ( self , table_name ) :
""": param str table _ name : Table name to drop .
: raises simplesqlite . NullDatabaseConnectionError :
| raises _ check _ connection |
: raises IOError : | raises _ write _ permission |"""
|
self . validate_access_permission ( [ "w" , "a" ] )
if table_name in SQLITE_SYSTEM_TABLES : # warning message
return
if self . has_table ( table_name ) :
query = "DROP TABLE IF EXISTS '{:s}'" . format ( table_name )
self . execute_query ( query , logging . getLogger ( ) . findCaller ( ) )
self . commit ( )
|
def _wrap_decorator ( decorator , targets , is_valid_target ) :
"""Wraps given decorator in order to provide additional functionality :
optional arguments and verification of decorator ' s target type .
: param decorator : Decorator callable
: param targets : Name of the decorator targets , as plural
( used in error messages )
: param is _ valid _ target : Callable for checking
whether decorator ' s target is valid
: return : Wrapped ` ` decorator ` `"""
|
@ functools . wraps ( decorator )
def wrapper ( * args , ** kwargs ) : # handle the case when decorator is applied as ` ` @ decorator ` `
# ( without any parameters and parentheses )
one_arg = len ( args ) == 1 and not kwargs
if one_arg and is_valid_target ( args [ 0 ] ) :
actual_decorator = decorator ( )
return actual_decorator ( args [ 0 ] )
# pass the parameters to decorator callable
# to get the actual decorator that can be applied to targets
actual_decorator = decorator ( * args , ** kwargs )
# TODO ( xion ) : The above raises TypeError with confusing message
# ( " < class > . _ _ new _ _ ( ) takes no parameters " ) when function decorator
# is applied to a class . See if we can detect that and do better .
# wrap it inside a function that verifies
# whether a target that user has given is valid for this decorator
def decorator_wrapper ( target ) :
if not is_valid_target ( target ) :
raise TypeError ( "@%s can only be applied to %s: got %r instead" % ( decorator . __name__ , targets , type ( target ) ) )
return actual_decorator ( target )
return decorator_wrapper
return wrapper
|
def add_pathway ( self , pathway ) :
"""Updates ` self . pathways ` and ` self . n _ pathways . `
Parameters
pathway : str
the pathway to add to the network ."""
|
if pathway not in self . pathways :
self . pathways [ pathway ] = self . n_pathways
self . n_pathways += 1
return self . pathways [ pathway ]
|
def member_update ( self , member_id , params ) :
"""update member ' s values with reconfig replica
Args :
member _ id - member index
params - updates member params
return True if operation success otherwise False"""
|
config = self . config
config [ 'members' ] [ member_id ] . update ( params . get ( "rsParams" , { } ) )
return self . repl_update ( config )
|
def get_files ( dir , exts , exclude = None , recursive = False ) :
"""Get a list of files within a directory with given extensions .
Exclude / black list directories from the list if specified . By default ,
the search is recursive , looking in all subdirectories of ' dir '
: param dir : String root directory to search under . All subdirectories are
also searched by default
: param exts : List of string file extensions . Files inside of dir with
names ending with this string will be included in the list . Note : the
string does not strictly need to be a file extension ( beginging with
a ' . ' dot ) , it could be the entire name of the file , or a common
suffix to files .
: param exclude : List of strings specifying directories that should not be
included in the output list
: param recursive : When True , search in all subdirectories , otherwise just
look in the current directory
: return : List of string directories"""
|
file_paths = [ ]
if recursive :
for root , _ , _ in os . walk ( dir ) : # os . walk ( ) does not add path separator by default to end of path
root = add_suffix ( root , get_path_separator ( ) )
if exclude is not None and is_dir_inside ( root , exclude ) : # Skip directories that are in the exclude list
continue
file_paths . extend ( get_files_in_dir ( root , * exts ) )
else :
file_paths . extend ( get_files_in_dir ( dir , * exts ) )
return file_paths
|
def swap_default ( mode , equation , symbol_names , default , ** kwargs ) :
'''Given a ` sympy ` equation or equality , along with a list of symbol names ,
substitute the specified default value for each symbol for which a value is
not provided through a keyword argument .
For example , consider the following equality :
> > > sp . pprint ( H )
V2 Z2
V1 Z1
Let us substitute a default value of 1 for terms Z1 and Z2:
> > > sp . pprint ( subs _ default ( H , [ ' Z1 ' , ' Z2 ' ] , 1 ) )
V2
V1
Now , let us specify a default value of 1 for terms Z1 and Z2 , but provide
an overriding value for Z1:
> > > sp . pprint ( subs _ default ( H , [ ' Z1 ' , ' Z2 ' ] , 1 , Z1 = 4 ) )
V2
─ ─ = 1/4
V1
Note that keyword arguments for terms not specified in the list of symbol
names are ignored :
> > > sp . pprint ( subs _ default ( H , [ ' Z1 ' , ' Z2 ' ] , 1 , Z1 = 4 , Q = 7 ) )
V2
─ ─ = 1/4
V1'''
|
if mode == 'subs' :
swap_f = _subs
default_swap_f = _subs
elif mode == 'limit' :
swap_f = _limit
default_swap_f = _subs
elif mode == 'limit_default' :
swap_f = _subs
default_swap_f = _limit
else :
raise ValueError ( '''Unsupported mode. `mode` must be one of: ''' '''('subs', 'limit').''' )
result = equation
for s in symbol_names :
if s in kwargs :
if isinstance ( kwargs [ s ] , Iterable ) :
continue
else :
result = swap_f ( result , s , kwargs [ s ] )
else :
result = default_swap_f ( result , s , default )
return result
|
def serialize_for_header ( key , value ) :
"""Serialize value for the given mapping key for a VCF header line"""
|
if key in QUOTE_FIELDS :
return json . dumps ( value )
elif isinstance ( value , str ) :
if " " in value or "\t" in value :
return json . dumps ( value )
else :
return value
elif isinstance ( value , list ) :
return "[{}]" . format ( ", " . join ( value ) )
else :
return str ( value )
|
def loop ( self ) :
"""check for mails and send them"""
|
for mail in Mail . objects . filter ( done = False , send_fail_count__lt = 3 ) : # send all emails that are not already send or failed to send less
# then three times
mail . send_mail ( )
for mail in Mail . objects . filter ( done = True , timestamp__lt = time ( ) - 60 * 60 * 24 * 7 ) : # delete all done emails older then one week
mail . delete ( )
return 1 , None
|
def execute ( self , vertices_substitution_dict = { } ) :
"""Executes the code
: param vertices _ substitution _ dict : aliases of the variables in the code
: return : True / False , depending on the result of the code ( default is True )"""
|
if not self . code_strings :
return True
if vertices_substitution_dict :
namespace = self . __substitute_names_in_namespace ( self . namespace , vertices_substitution_dict )
else :
namespace = self . namespace
try :
self . __execute_code ( self . _compiled_ast_and_expr , namespace )
except :
pass
return namespace [ 'result' ]
|
def plot_tree ( booster , num_trees = 0 , rankdir = 'UT' , ax = None , ** kwargs ) :
"""Plot specified tree .
Parameters
booster : Booster , XGBModel
Booster or XGBModel instance
num _ trees : int , default 0
Specify the ordinal number of target tree
rankdir : str , default " UT "
Passed to graphiz via graph _ attr
ax : matplotlib Axes , default None
Target axes instance . If None , new figure and axes will be created .
kwargs :
Other keywords passed to to _ graphviz
Returns
ax : matplotlib Axes"""
|
try :
import matplotlib . pyplot as plt
import matplotlib . image as image
except ImportError :
raise ImportError ( 'You must install matplotlib to plot tree' )
if ax is None :
_ , ax = plt . subplots ( 1 , 1 )
g = to_graphviz ( booster , num_trees = num_trees , rankdir = rankdir , ** kwargs )
s = BytesIO ( )
s . write ( g . pipe ( format = 'png' ) )
s . seek ( 0 )
img = image . imread ( s )
ax . imshow ( img )
ax . axis ( 'off' )
return ax
|
def checkCorpNums ( self , MemberCorpNum , CorpNumList ) :
"""휴폐업조회 대량 확인 , 최대 1000건
args
MemberCorpNum : 팝빌회원 사업자번호
CorpNumList : 조회할 사업자번호 배열
return
휴폐업정보 Object as List
raise
PopbillException"""
|
if CorpNumList == None or len ( CorpNumList ) < 1 :
raise PopbillException ( - 99999999 , "조죄할 사업자번호 목록이 입력되지 않았습니다." )
postData = self . _stringtify ( CorpNumList )
return self . _httppost ( '/CloseDown' , postData , MemberCorpNum )
|
def assemble_pairs ( p , pf , tag , target = [ "final.contigs.fasta" ] ) :
"""Take one pair of reads and assemble to contigs . fasta ."""
|
slink ( p , pf , tag )
assemble_dir ( pf , target )
|
def login ( self , verbose = False ) :
'''Authenticate with Opsview
: param verbose : Verbose output mode
: type verbose : bool
: return : The authentification token
: rtype : str or unicode'''
|
url = '{}/{}' . format ( self . rest_url , 'login' )
logger . debug ( 'POST: {}' . format ( url ) )
r = requests . post ( url , json = { 'username' : self . username , 'password' : self . password } , verify = self . verify_ssl )
j = r . json ( )
logger . debug ( 'Request response:' )
logger . debug ( pformat ( vars ( r ) ) )
logger . debug ( 'JSON:' )
logger . debug ( pformat ( j ) )
if 'token' not in j :
raise OpsviewLoginException ( "Failed to retrieve token. " "Please check your credentials" )
self . headers [ 'X-Opsview-Token' ] = j [ 'token' ]
self . _token_age = datetime . datetime . now ( )
return j [ 'token' ]
|
def get_position ( self ) :
""": returns : Robot ' s current position on the field as ` ( x , y , angle ) ` .
` x ` and ` y ` are specified in feet , ` angle ` is in radians"""
|
with self . _lock :
return self . x , self . y , self . angle
|
def piecewise ( target , throat_endpoints = 'throat.endpoints' , throat_centroid = 'throat.centroid' ) :
r"""Calculate throat length from end points and optionally a centroid
Parameters
target : OpenPNM Object
The object which this model is associated with . This controls the
length of the calculated array , and also provides access to other
necessary properties .
throat _ endpoints : string
Dictionary key of the throat endpoint values .
throat _ centroid : string
Dictionary key of the throat centroid values , optional .
Returns
Lt : ndarray
Array containing throat lengths for the given geometry .
Notes
(1 ) By default , the model assumes that the centroids of pores and the
connecting throat in each conduit are colinear .
(2 ) If ` throat _ centroid ` is passed , the model accounts for the extra
length . This could be useful for Voronoi or extracted networks ."""
|
_np . warnings . filterwarnings ( 'ignore' , category = RuntimeWarning )
network = target . project . network
throats = network . map_throats ( throats = target . Ts , origin = target )
# Get throat endpoints
EP1 = network [ throat_endpoints + '.head' ] [ throats ]
EP2 = network [ throat_endpoints + '.tail' ] [ throats ]
# Calculate throat length
Lt = _sqrt ( ( ( EP1 - EP2 ) ** 2 ) . sum ( axis = 1 ) )
# Handle the case where pores & throat centroids are not colinear
try :
Ct = network [ throat_centroid ] [ throats ]
Lt = _sqrt ( ( ( Ct - EP1 ) ** 2 ) . sum ( axis = 1 ) ) + _sqrt ( ( ( Ct - EP2 ) ** 2 ) . sum ( axis = 1 ) )
except KeyError :
pass
_np . warnings . filterwarnings ( 'default' , category = RuntimeWarning )
return Lt
|
def string_sanitize ( string , tab_width = 8 ) :
r"""strips , and replaces non - printable characters
: param tab _ width : number of spaces to replace tabs with . Read from
` globals . tabwidth ` setting if ` None `
: type tab _ width : int or ` None `
> > > string _ sanitize ( ' foo \ rbar ' , 8)
' foobar '
> > > string _ sanitize ( ' foo \ tbar ' , 8)
' foo bar '
> > > string _ sanitize ( ' foo \ t \ tbar ' , 8)
' foo bar '"""
|
string = string . replace ( '\r' , '' )
lines = list ( )
for line in string . split ( '\n' ) :
tab_count = line . count ( '\t' )
if tab_count > 0 :
line_length = 0
new_line = list ( )
for i , chunk in enumerate ( line . split ( '\t' ) ) :
line_length += len ( chunk )
new_line . append ( chunk )
if i < tab_count :
next_tab_stop_in = tab_width - ( line_length % tab_width )
new_line . append ( ' ' * next_tab_stop_in )
line_length += next_tab_stop_in
lines . append ( '' . join ( new_line ) )
else :
lines . append ( line )
return '\n' . join ( lines )
|
def set_locale ( request ) :
"""Return locale from GET lang param or automatically ."""
|
return request . query . get ( 'lang' , app . ps . babel . select_locale_by_request ( request ) )
|
def imread ( img , color = None , dtype = None ) :
'''dtype = ' noUint ' , uint8 , float , ' float ' , . . .'''
|
COLOR2CV = { 'gray' : cv2 . IMREAD_GRAYSCALE , 'all' : cv2 . IMREAD_COLOR , None : cv2 . IMREAD_ANYCOLOR }
c = COLOR2CV [ color ]
if callable ( img ) :
img = img ( )
elif isinstance ( img , string_types ) : # from _ file = True
# try :
# ftype = img [ img . find ( ' . ' ) : ]
# img = READERS [ ftype ] ( img ) [ 0]
# except KeyError :
# open with openCV
# grey - 8 bit
if dtype in ( None , "noUint" ) or np . dtype ( dtype ) != np . uint8 :
c |= cv2 . IMREAD_ANYDEPTH
img2 = cv2 . imread ( img , c )
if img2 is None :
raise IOError ( "image '%s' is not existing" % img )
img = img2
elif color == 'gray' and img . ndim == 3 : # multi channel img like rgb
# cv2 . cvtColor ( img , cv2 . COLOR _ BGR2GRAY ) # cannot handle float64
img = toGray ( img )
# transform array to uint8 array due to openCV restriction
if dtype is not None :
if isinstance ( img , np . ndarray ) :
img = _changeArrayDType ( img , dtype , cutHigh = False )
return img
|
def wallet_representative_set ( self , wallet , representative ) :
"""Sets the default * * representative * * for * * wallet * *
. . enable _ control required
: param wallet : Wallet to set default representative account for
: type wallet : str
: param representative : Representative account to set for * * wallet * *
: type representative : str
: raises : : py : exc : ` nano . rpc . RPCException `
> > > rpc . wallet _ representative _ set (
. . . wallet = " 000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F " ,
. . . representative = " xrb _ 3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000"
True"""
|
wallet = self . _process_value ( wallet , 'wallet' )
representative = self . _process_value ( representative , 'account' )
payload = { "wallet" : wallet , "representative" : representative }
resp = self . call ( 'wallet_representative_set' , payload )
return resp [ 'set' ] == '1'
|
def print_summary ( self , decimals = 2 , ** kwargs ) :
"""Print summary statistics describing the fit , the coefficients , and the error bounds .
Parameters
decimals : int , optional ( default = 2)
specify the number of decimal places to show
kwargs :
print additional metadata in the output ( useful to provide model names , dataset names , etc . ) when comparing
multiple outputs ."""
|
justify = string_justify ( 18 )
print ( self )
print ( "{} = {}" . format ( justify ( "number of subjects" ) , self . event_observed . shape [ 0 ] ) )
print ( "{} = {}" . format ( justify ( "number of events" ) , np . where ( self . event_observed ) [ 0 ] . shape [ 0 ] ) )
print ( "{} = {:.{prec}f}" . format ( justify ( "log-likelihood" ) , self . _log_likelihood , prec = decimals ) )
print ( "{} = {}" . format ( justify ( "hypothesis" ) , ", " . join ( "%s != %d" % ( name , iv ) for ( name , iv ) in zip ( self . _fitted_parameter_names , self . _initial_values ) ) , ) )
for k , v in kwargs . items ( ) :
print ( "{} = {}\n" . format ( justify ( k ) , v ) )
print ( end = "\n" )
print ( "---" )
df = self . summary
print ( df . to_string ( float_format = format_floats ( decimals ) , formatters = { "p" : format_p_value ( decimals ) , "exp(coef)" : format_exp_floats ( decimals ) } , ) )
|
async def delete_reply_markup ( self ) :
"""Use this method to delete reply markup of messages sent by the bot or via the bot ( for inline bots ) .
: return : On success , if edited message is sent by the bot , the edited Message is returned ,
otherwise True is returned .
: rtype : : obj : ` typing . Union [ types . Message , base . Boolean ] `"""
|
return await self . bot . edit_message_reply_markup ( chat_id = self . chat . id , message_id = self . message_id )
|
def bulk_delete ( self , ids = None , filter = None , type = None , all = False ) : # pylint : disable = redefined - builtin
"""Bulk delete a set of configs .
: param ids : ( optional ) Int list of config IDs .
: param filter : ( optional ) String list of filters .
: param type : ( optional ) ` union ` or ` inter ` as string .
: param all : ( optional ) Apply to all if bool ` True ` ."""
|
return self . service . bulk_delete ( self . base , self . RESOURCE , ids = ids , filter = filter , type = type , all = all )
|
def contains_call_signature ( caller , key ) :
"""Check if a function or method call signature contains a specific
argument .
Args :
caller ( Callable ) :
Method or function to check if signature is contain in .
key ( str ) :
Signature to look for .
Returns :
True if ` ` key ` ` exits in ` ` caller ` ` call signature .
Examples :
> > > def foo ( param ) : pass
> > > contains _ call _ signature ( foo , " param " )
True
> > > contains _ call _ signature ( foo , " not _ param " )
False
> > > class Bar :
. . . def baz ( self , param ) : pass
> > > bar = Bar ( )
> > > contains _ call _ signature ( bar . baz , " param " )
True
> > > contains _ call _ signature ( bar . baz , " not _ param " )
False"""
|
try :
args = inspect . signature ( caller ) . parameters
except AttributeError :
args = inspect . getargspec ( caller ) . args
return key in args
|
def getBottomRight ( self ) :
"""Return the ` ` Location ` ` of the bottom right corner of this region"""
|
return Location ( self . x + self . w , self . y + self . h )
|
def reset ( ctx ) :
"""Reset all PIV data .
This action will wipe all data and restore factory settings for
the PIV application on your YubiKey ."""
|
click . echo ( 'Resetting PIV data...' )
ctx . obj [ 'controller' ] . reset ( )
click . echo ( 'Success! All PIV data have been cleared from your YubiKey.' )
click . echo ( 'Your YubiKey now has the default PIN, PUK and Management Key:' )
click . echo ( '\tPIN:\t123456' )
click . echo ( '\tPUK:\t12345678' )
click . echo ( '\tManagement Key:\t010203040506070801020304050607080102030405060708' )
|
def extract_specific_interval ( self , interval_start , interval_end ) :
"""Overload if special behaviour is required when a series ends ."""
|
interval_start = int ( interval_start )
interval_end = int ( interval_end )
if interval_start >= interval_end :
raise ValueError ( "Requested interval's start point is past the requested end point." )
elif interval_start > self . time_series . size :
if self . repeat :
interval_end = interval_end - interval_start
interval_start = 0
else :
raise ValueError ( "Requested interval's start point is past the end of the time series." )
if interval_end < self . time_series . size :
interval = self . time_series [ interval_start : interval_end ]
else :
requested_interval_size = interval_end - interval_start
remaining_signal = self . time_series [ interval_start : self . time_series . size ]
if self . repeat :
if requested_interval_size < self . time_series . size :
self . _next_interval_start = requested_interval_size - remaining_signal . size
interval = np . hstack ( ( remaining_signal , self . time_series [ 0 : self . _next_interval_start ] ) )
else :
repeated_signal = np . repeat ( self . time_series , np . floor ( requested_interval_size / self . time_series . size ) )
self . _next_interval_start = requested_interval_size % self . time_series . size
interval = ( np . hstack ( ( remaining_signal , repeated_signal ) ) ) [ 0 : requested_interval_size ]
else :
self . warning ( "Returning last interval of the time series." )
self . _next_interval_start = self . time_series . size + 1
samples_per_interval = self . interval_length * self . sample_rate
interval = np . hstack ( ( remaining_signal , np . zeros ( samples_per_interval - remaining_signal . size ) ) )
return interval
|
def to_ufo_glyph_background ( self , glyph , layer ) :
"""Set glyph background ."""
|
if not layer . hasBackground :
return
background = layer . background
ufo_layer = self . to_ufo_background_layer ( glyph )
new_glyph = ufo_layer . newGlyph ( glyph . name )
width = background . userData [ BACKGROUND_WIDTH_KEY ]
if width is not None :
new_glyph . width = width
self . to_ufo_background_image ( new_glyph , background )
self . to_ufo_paths ( new_glyph , background )
self . to_ufo_components ( new_glyph , background )
self . to_ufo_glyph_anchors ( new_glyph , background . anchors )
self . to_ufo_guidelines ( new_glyph , background )
|
def _make_padded_ramp ( self , v1 , v2 , pad_velocity , total_time ) :
"""Makes a ramp that looks like this :
v1 \ _ _ _ _ _ pad _ velocity
| | | | \v 2
t1 tp t2
Such that whole section takes total _ time"""
|
# The time taken to ramp from v1 to pad _ velocity
t1 = self . acceleration_time ( v1 , pad_velocity )
# Then on to v2
t2 = self . acceleration_time ( pad_velocity , v2 )
# The distance during the pad
tp = total_time - t1 - t2
# Yield the points
yield t1 , pad_velocity
yield tp , pad_velocity
yield t2 , v2
|
def delete_pre_shared_key ( self , endpoint_name , ** kwargs ) : # noqa : E501
"""Remove a pre - shared key . # noqa : E501
Remove a pre - shared key . * * Example usage : * * ` ` ` curl - H \" authorization : Bearer $ { API _ TOKEN } \" - X DELETE https : / / api . us - east - 1 . mbedcloud . com / v2 / device - shared - keys / my - endpoint - 0001 ` ` ` # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass asynchronous = True
> > > thread = api . delete _ pre _ shared _ key ( endpoint _ name , asynchronous = True )
> > > result = thread . get ( )
: param asynchronous bool
: param str endpoint _ name : The unique endpoint identifier that this pre - shared key applies to . [ Reserved characters ] ( https : / / en . wikipedia . org / wiki / Percent - encoding # Percent - encoding _ reserved _ characters ) must be percent - encoded . ( required )
: return : None
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'asynchronous' ) :
return self . delete_pre_shared_key_with_http_info ( endpoint_name , ** kwargs )
# noqa : E501
else :
( data ) = self . delete_pre_shared_key_with_http_info ( endpoint_name , ** kwargs )
# noqa : E501
return data
|
def createModel ( self , model , context , owner = '' , includeReferences = True ) :
"""Creates a new table in the database based cff the inputted
schema information . If the dryRun flag is specified , then
the SQLConnection will only be logged to the current logger , and not
actually executed in the database .
: param model | < orb . Model >
context | < orb . Context >
: return < bool > success"""
|
CREATE = self . statement ( 'CREATE' )
sql , data = CREATE ( model , includeReferences = includeReferences , owner = owner )
if not sql :
log . error ( 'Failed to create {0}' . format ( model . schema ( ) . dbname ( ) ) )
return False
else :
if context . dryRun :
print sql % data
else :
self . execute ( sql , data , writeAccess = True )
log . info ( 'Created {0}' . format ( model . schema ( ) . dbname ( ) ) )
return True
|
def merchant_support_email ( self , merchant_support_email ) :
"""Sets the merchant _ support _ email of this CreateCheckoutRequest .
The email address to display on the Square Checkout confirmation page and confirmation email that the buyer can use to contact the merchant . If this value is not set , the confirmation page and email will display the primary email address associated with the merchant ' s Square account . Default : none ; only exists if explicitly set .
: param merchant _ support _ email : The merchant _ support _ email of this CreateCheckoutRequest .
: type : str"""
|
if merchant_support_email is None :
raise ValueError ( "Invalid value for `merchant_support_email`, must not be `None`" )
if len ( merchant_support_email ) > 254 :
raise ValueError ( "Invalid value for `merchant_support_email`, length must be less than `254`" )
self . _merchant_support_email = merchant_support_email
|
def read ( self , table_name , limit = None , decode_geom = False , shared_user = None , retry_times = 3 ) :
"""Read a table from CARTO into a pandas DataFrames .
Args :
table _ name ( str ) : Name of table in user ' s CARTO account .
limit ( int , optional ) : Read only ` limit ` lines from
` table _ name ` . Defaults to ` ` None ` ` , which reads the full table .
decode _ geom ( bool , optional ) : Decodes CARTO ' s geometries into a
` Shapely < https : / / github . com / Toblerity / Shapely > ` _ _
object that can be used , for example , in ` GeoPandas
< http : / / geopandas . org / > ` _ _ .
shared _ user ( str , optional ) : If a table has been shared with you ,
specify the user name ( schema ) who shared it .
retry _ times ( int , optional ) : If the read call is rate limited ,
number of retries to be made
Returns :
pandas . DataFrame : DataFrame representation of ` table _ name ` from
CARTO .
Example :
. . code : : python
import cartoframes
cc = cartoframes . CartoContext ( BASEURL , APIKEY )
df = cc . read ( ' acadia _ biodiversity ' )"""
|
# choose schema ( default user - org or standalone - or shared )
schema = 'public' if not self . is_org else ( shared_user or self . creds . username ( ) )
dataset = Dataset ( self , table_name , schema )
return dataset . download ( limit , decode_geom , retry_times )
|
def _check_arguments ( self ) :
"""verify the argument of the default symbol and user provided parameters"""
|
if self . argument_checked :
return
assert ( self . symbol is not None )
self . argument_checked = True
# check if symbol contain duplicated names .
_check_arguments ( self . symbol )
# rematch parameters to delete useless ones
if self . allow_extra_params :
if self . arg_params :
arg_names = set ( self . symbol . list_arguments ( ) )
self . arg_params = { k : v for k , v in self . arg_params . items ( ) if k in arg_names }
if self . aux_params :
aux_names = set ( self . symbol . list_auxiliary_states ( ) )
self . aux_params = { k : v for k , v in self . aux_params . items ( ) if k in aux_names }
|
def window_size ( self , config , now ) : # purge old samples before we compute the window size
self . _stat . purge_obsolete_samples ( config , now )
"""Here we check the total amount of time elapsed since the oldest
non - obsolete window . This give the total window _ size of the batch
which is the time used for Rate computation . However , there is
an issue if we do not have sufficient data for e . g . if only
1 second has elapsed in a 30 second window , the measured rate
will be very high . Hence we assume that the elapsed time is
always N - 1 complete windows plus whatever fraction of the final
window is complete .
Note that we could simply count the amount of time elapsed in
the current window and add n - 1 windows to get the total time ,
but this approach does not account for sleeps . AbstractSampledStat
only creates samples whenever record is called , if no record is
called for a period of time that time is not accounted for in
window _ size and produces incorrect results ."""
|
total_elapsed_time_ms = now - self . _stat . oldest ( now ) . last_window_ms
# Check how many full windows of data we have currently retained
num_full_windows = int ( total_elapsed_time_ms / config . time_window_ms )
min_full_windows = config . samples - 1
# If the available windows are less than the minimum required ,
# add the difference to the totalElapsedTime
if num_full_windows < min_full_windows :
total_elapsed_time_ms += ( ( min_full_windows - num_full_windows ) * config . time_window_ms )
return total_elapsed_time_ms
|
def list_default_vsan_policy ( service_instance = None ) :
'''Returns the default vsan storage policy .
service _ instance
Service instance ( vim . ServiceInstance ) of the vCenter .
Default is None .
. . code - block : : bash
salt ' * ' vsphere . list _ storage _ policies
salt ' * ' vsphere . list _ storage _ policy policy _ names = [ policy _ name ]'''
|
profile_manager = salt . utils . pbm . get_profile_manager ( service_instance )
policies = salt . utils . pbm . get_storage_policies ( profile_manager , get_all_policies = True )
def_policies = [ p for p in policies if p . systemCreatedProfileType == 'VsanDefaultProfile' ]
if not def_policies :
raise VMwareObjectRetrievalError ( 'Default VSAN policy was not ' 'retrieved' )
return _get_policy_dict ( def_policies [ 0 ] )
|
def format_timestamp ( t ) :
"""Cast given object to a Timestamp and return a nicely formatted string"""
|
# Timestamp is only valid for 1678 to 2262
try :
datetime_str = str ( pd . Timestamp ( t ) )
except OutOfBoundsDatetime :
datetime_str = str ( t )
try :
date_str , time_str = datetime_str . split ( )
except ValueError : # catch NaT and others that don ' t split nicely
return datetime_str
else :
if time_str == '00:00:00' :
return date_str
else :
return '{}T{}' . format ( date_str , time_str )
|
def XYZ_to_IPT ( cobj , * args , ** kwargs ) :
"""Converts XYZ to IPT .
NOTE : XYZ values need to be adapted to 2 degree D65
Reference :
Fairchild , M . D . ( 2013 ) . Color appearance models , 3rd Ed . ( pp . 271-272 ) . John Wiley & Sons ."""
|
if cobj . illuminant != 'd65' or cobj . observer != '2' :
raise ValueError ( 'XYZColor for XYZ->IPT conversion needs to be D65 adapted.' )
xyz_values = numpy . array ( cobj . get_value_tuple ( ) )
lms_values = numpy . dot ( IPTColor . conversion_matrices [ 'xyz_to_lms' ] , xyz_values )
lms_prime = numpy . sign ( lms_values ) * numpy . abs ( lms_values ) ** 0.43
ipt_values = numpy . dot ( IPTColor . conversion_matrices [ 'lms_to_ipt' ] , lms_prime )
return IPTColor ( * ipt_values )
|
def delete ( self , crash ) :
"""Remove the given crash dump from the database .
@ type crash : L { Crash }
@ param crash : Crash dump to remove ."""
|
query = self . _session . query ( CrashDTO ) . filter_by ( id = crash . _rowid )
query . delete ( synchronize_session = False )
del crash . _rowid
|
def get_fields ( self , request , obj = None ) :
"""do not show raw value ( readonly ) when adding a new item"""
|
fields = self . fields [ : ]
if not obj :
fields . remove ( 'value' )
return fields
|
def _fetch_dimensions ( self , dataset ) :
"""We override this method just to set the correct datatype
and dialect for regions ."""
|
for dimension in super ( SCB , self ) . _fetch_dimensions ( dataset ) :
if dimension . id == "Region" :
yield Dimension ( dimension . id , datatype = "region" , dialect = "skatteverket" , label = dimension . label )
else :
yield dimension
|
def cdl_add ( self , source_url , save_path = '/' , timeout = 3600 ) :
'''Usage : cdl _ add < source _ url > [ save _ path ] [ timeout ] - add an offline ( cloud ) download task
source _ url - the URL to download file from .
save _ path - path on PCS to save file to . default is to save to root directory ' / ' .
timeout - timeout in seconds . default is 3600 seconds .'''
|
rpath = self . __get_cdl_dest ( source_url , save_path )
return self . __cdl_add ( source_url , rpath , timeout )
|
def cli ( env , identifier , label , note ) :
"""Edits an SSH key ."""
|
mgr = SoftLayer . SshKeyManager ( env . client )
key_id = helpers . resolve_id ( mgr . resolve_ids , identifier , 'SshKey' )
if not mgr . edit_key ( key_id , label = label , notes = note ) :
raise exceptions . CLIAbort ( 'Failed to edit SSH key' )
|
def check_subprocess ( self ) :
"""Make sure the process didn ' t exit with an error and run the checks .
: rtype : bool
: return : the actual check status
: raise ProcessExitedWithError : when the main process exits with
an error"""
|
exit_code = self . process . poll ( )
if exit_code is not None and exit_code != 0 : # The main process exited with an error . Clean up the children
# if any .
self . _kill_all_kids ( self . _sig_kill )
self . _clear_process ( )
raise ProcessExitedWithError ( self , exit_code )
return self . after_start_check ( )
|
def _ParseVValueString ( self , parser_mediator , data , user_information_descriptor ) :
"""Parses a V value string .
Args :
parser _ mediator ( ParserMediator ) : mediates interactions between parsers
and other components , such as storage and dfvfs .
data ( bytes ) : Windows Registry V value data .
user _ information _ descriptor ( user _ information _ descriptor ) : V value
user information descriptor .
Returns :
str : string value stored in the Windows Registry V value data ."""
|
data_start_offset = ( user_information_descriptor . offset + self . _V_VALUE_STRINGS_OFFSET )
data_end_offset = data_start_offset + user_information_descriptor . size
descriptor_data = data [ data_start_offset : data_end_offset ]
try :
username = descriptor_data . decode ( 'utf-16-le' )
except ( UnicodeDecodeError , UnicodeEncodeError ) as exception :
username = descriptor_data . decode ( 'utf-16-le' , errors = 'replace' )
parser_mediator . ProduceExtractionWarning ( ( 'unable to decode V value string with error: {0!s}. Characters ' 'that cannot be decoded will be replaced with "?" or ' '"\\ufffd".' ) . format ( exception ) )
return username
|
def generate ( basename , xml_list ) :
'''generate complete MAVLink Java implemenation'''
|
for xml in xml_list :
generate_one ( basename , xml )
generate_enums ( basename , xml )
generate_MAVLinkMessage ( basename , xml_list )
copy_fixed_headers ( basename , xml_list [ 0 ] )
|
def _set_align_split_size ( data ) :
"""Set useful align _ split _ size , generating an estimate if it doesn ' t exist .
We try to split on larger inputs and avoid too many pieces , aiming for size
chunks of 5Gb or at most 50 maximum splits .
The size estimate used in calculations is 20 million reads for ~ 5Gb .
For UMI calculations we skip splitting since we ' re going to align and
re - align after consensus .
For CWL runs , we pick larger split sizes to avoid overhead of staging each chunk ."""
|
if cwlutils . is_cwl_run ( data ) :
target_size = 20
# Gb
target_size_reads = 80
# million reads
else :
target_size = 5
# Gb
target_size_reads = 20
# million reads
max_splits = 100
# Avoid too many pieces , causing merge memory problems
val = dd . get_align_split_size ( data )
umi_consensus = dd . get_umi_consensus ( data )
if val is None :
if not umi_consensus :
total_size = 0
# Gb
# Use original files if we might have reduced the size of our prepped files
input_files = data . get ( "files_orig" , [ ] ) if dd . get_save_diskspace ( data ) else data . get ( "files" , [ ] )
for fname in input_files :
if os . path . exists ( fname ) :
total_size += os . path . getsize ( fname ) / ( 1024.0 * 1024.0 * 1024.0 )
# Only set if we have files and are bigger than the target size
if total_size > target_size :
data [ "config" ] [ "algorithm" ] [ "align_split_size" ] = int ( 1e6 * _pick_align_split_size ( total_size , target_size , target_size_reads , max_splits ) )
elif val :
assert not umi_consensus , "Cannot set align_split_size to %s with UMI conensus specified" % val
return data
|
def validate ( self ) :
"""Validates the given Amazon S3 file with : attr : ` validators ` . If errors
occur they are appended to : attr : ` errors ` . If the file is valid and a
` AWS _ UNVALIDATED _ PREFIX ` config is present , its value will be removed
from the file key .
: return : a boolean indicating if the file vas valid ."""
|
for validator in self . validators :
try :
validator ( self . obj )
except ValidationError as e :
self . errors . append ( e . error )
if not self . errors and self . _has_unvalidated_prefix ( ) :
self . _move_to_validated ( )
return not self . errors
|
def load_model ( model_path ) :
"""Load a libsvm model from a path on disk .
This currently supports :
* C - SVC
* NU - SVC
* Epsilon - SVR
* NU - SVR
Parameters
model _ path : str
Path on disk where the libsvm model representation is .
Returns
model : libsvm _ model
A model of the libsvm format ."""
|
if not ( HAS_LIBSVM ) :
raise RuntimeError ( 'libsvm not found. libsvm conversion API is disabled.' )
from svmutil import svm_load_model
# From libsvm
import os
if ( not os . path . exists ( model_path ) ) :
raise IOError ( "Expected a valid file path. %s does not exist" % model_path )
return svm_load_model ( model_path )
|
def _cast_to_type ( self , value ) :
"""Convert the value to a boolean and raise error on failures"""
|
if value in ( True , False ) :
return bool ( value )
if value in ( 't' , 'True' , '1' ) :
return True
if value in ( 'f' , 'False' , '0' ) :
return False
self . fail ( 'invalid' , value = value )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.