signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def implement ( cls , implementations , for_type = None , for_types = None ) :
"""Provide protocol implementation for a type .
Register all implementations of multimethod functions in this
protocol and add the type into the abstract base class of the
protocol .
Arguments :
implementations : A dict of ( function , implementation ) , where each
function is multimethod and each implementation is a callable .
for _ type : The concrete type implementations apply to .
for _ types : Same as for _ type , but takes a tuple of types .
You may not supply both for _ type and for _ types for obvious reasons .
Raises :
ValueError for arguments .
TypeError if not all implementations are provided or if there
are issues related to polymorphism ( e . g . attempting to
implement a non - multimethod function ."""
|
for type_ in cls . __get_type_args ( for_type , for_types ) :
cls . _implement_for_type ( for_type = type_ , implementations = implementations )
|
def get_signature_request_list ( self , page = 1 , ux_version = None ) :
'''Get a list of SignatureRequest that you can access
This includes SignatureRequests you have sent as well as received , but
not ones that you have been CCed on .
Args :
page ( int , optional ) : Which page number of the SignatureRequest list to return . Defaults to 1.
ux _ version ( int ) : UX version , either 1 ( default ) or 2.
Returns :
A ResourceList object'''
|
request = self . _get_request ( )
parameters = { "page" : page }
if ux_version is not None :
parameters [ 'ux_version' ] = ux_version
return request . get ( self . SIGNATURE_REQUEST_LIST_URL , parameters = parameters )
|
def MaxSpeed ( self , speed ) :
'Setup of maximum speed'
|
spi . SPI_write ( self . CS , [ 0x07 , 0x07 ] )
# Max Speed setup
spi . SPI_write ( self . CS , [ 0x00 , 0x00 ] )
spi . SPI_write ( self . CS , [ speed , speed ] )
|
def create_tag ( version : Union [ Version , str ] , tag_format : Optional [ str ] = None ) :
"""The tag and the software version might be different .
That ' s why this function exists .
Example :
| tag | version ( PEP 0440 ) |
| v0.9.0 | 0.9.0 |
| ver1.0.0 | 1.0.0 |
| ver1.0.0 . a0 | 1.0.0a0 |"""
|
if isinstance ( version , str ) :
version = Version ( version )
if not tag_format :
return version . public
major , minor , patch = version . release
prerelease = ""
if version . is_prerelease :
prerelease = f"{version.pre[0]}{version.pre[1]}"
t = Template ( tag_format )
return t . safe_substitute ( version = version , major = major , minor = minor , patch = patch , prerelease = prerelease )
|
def _find_reader_dataset ( self , dataset_key , ** dfilter ) :
"""Attempt to find a ` DatasetID ` in the available readers .
Args :
dataset _ key ( str , float , DatasetID ) :
Dataset name , wavelength , or a combination of ` DatasetID `
parameters to use in searching for the dataset from the
available readers .
* * dfilter ( list or str ) : ` DatasetID ` parameters besides ` name `
and ` wavelength ` to use to filter the
available datasets . Passed directly to
` get _ dataset _ key ` of the readers , see
that method for more information ."""
|
too_many = False
for reader_name , reader_instance in self . readers . items ( ) :
try :
ds_id = reader_instance . get_dataset_key ( dataset_key , ** dfilter )
except TooManyResults :
LOG . trace ( "Too many datasets matching key {} in reader {}" . format ( dataset_key , reader_name ) )
too_many = True
continue
except KeyError :
LOG . trace ( "Can't find dataset %s in reader %s" , str ( dataset_key ) , reader_name )
continue
LOG . trace ( "Found {} in reader {} when asking for {}" . format ( str ( ds_id ) , reader_name , repr ( dataset_key ) ) )
try : # now that we know we have the exact DatasetID see if we have already created a Node for it
return self . getitem ( ds_id )
except KeyError : # we haven ' t created a node yet , create it now
return Node ( ds_id , { 'reader_name' : reader_name } )
if too_many :
raise TooManyResults ( "Too many keys matching: {}" . format ( dataset_key ) )
|
async def download ( resource_url ) :
'''Download given resource _ url'''
|
scheme = resource_url . parsed . scheme
if scheme in ( 'http' , 'https' ) :
await download_http ( resource_url )
elif scheme in ( 'git' , 'git+https' , 'git+http' ) :
await download_git ( resource_url )
else :
raise ValueError ( 'Unknown URL scheme: "%s"' % scheme )
|
def ruge_stuben_solver ( A , strength = ( 'classical' , { 'theta' : 0.25 } ) , CF = 'RS' , presmoother = ( 'gauss_seidel' , { 'sweep' : 'symmetric' } ) , postsmoother = ( 'gauss_seidel' , { 'sweep' : 'symmetric' } ) , max_levels = 10 , max_coarse = 10 , keep = False , ** kwargs ) :
"""Create a multilevel solver using Classical AMG ( Ruge - Stuben AMG ) .
Parameters
A : csr _ matrix
Square matrix in CSR format
strength : [ ' symmetric ' , ' classical ' , ' evolution ' , ' distance ' , ' algebraic _ distance ' , ' affinity ' , ' energy _ based ' , None ]
Method used to determine the strength of connection between unknowns
of the linear system . Method - specific parameters may be passed in
using a tuple , e . g . strength = ( ' symmetric ' , { ' theta ' : 0.25 } ) . If
strength = None , all nonzero entries of the matrix are considered strong .
CF : string
Method used for coarse grid selection ( C / F splitting )
Supported methods are RS , PMIS , PMISc , CLJP , CLJPc , and CR .
presmoother : string or dict
Method used for presmoothing at each level . Method - specific parameters
may be passed in using a tuple , e . g .
presmoother = ( ' gauss _ seidel ' , { ' sweep ' : ' symmetric } ) , the default .
postsmoother : string or dict
Postsmoothing method with the same usage as presmoother
max _ levels : integer
Maximum number of levels to be used in the multilevel solver .
max _ coarse : integer
Maximum number of variables permitted on the coarse grid .
keep : bool
Flag to indicate keeping extra operators in the hierarchy for
diagnostics . For example , if True , then strength of connection ( C ) and
tentative prolongation ( T ) are kept .
Returns
ml : multilevel _ solver
Multigrid hierarchy of matrices and prolongation operators
Examples
> > > from pyamg . gallery import poisson
> > > from pyamg import ruge _ stuben _ solver
> > > A = poisson ( ( 10 , ) , format = ' csr ' )
> > > ml = ruge _ stuben _ solver ( A , max _ coarse = 3)
Notes
" coarse _ solver " is an optional argument and is the solver used at the
coarsest grid . The default is a pseudo - inverse . Most simply ,
coarse _ solver can be one of [ ' splu ' , ' lu ' , ' cholesky , ' pinv ' ,
' gauss _ seidel ' , . . . ] . Additionally , coarse _ solver may be a tuple
( fn , args ) , where fn is a string such as [ ' splu ' , ' lu ' , . . . ] or a callable
function , and args is a dictionary of arguments to be passed to fn .
See [ 2001TrOoSc ] _ for additional details .
References
. . [ 2001TrOoSc ] Trottenberg , U . , Oosterlee , C . W . , and Schuller , A . ,
" Multigrid " San Diego : Academic Press , 2001 . Appendix A
See Also
aggregation . smoothed _ aggregation _ solver , multilevel _ solver ,
aggregation . rootnode _ solver"""
|
levels = [ multilevel_solver . level ( ) ]
# convert A to csr
if not isspmatrix_csr ( A ) :
try :
A = csr_matrix ( A )
warn ( "Implicit conversion of A to CSR" , SparseEfficiencyWarning )
except BaseException :
raise TypeError ( 'Argument A must have type csr_matrix, \
or be convertible to csr_matrix' )
# preprocess A
A = A . asfptype ( )
if A . shape [ 0 ] != A . shape [ 1 ] :
raise ValueError ( 'expected square matrix' )
levels [ - 1 ] . A = A
while len ( levels ) < max_levels and levels [ - 1 ] . A . shape [ 0 ] > max_coarse :
extend_hierarchy ( levels , strength , CF , keep )
ml = multilevel_solver ( levels , ** kwargs )
change_smoothers ( ml , presmoother , postsmoother )
return ml
|
def delete ( cls , schedule_id , schedule_instance_id , note_attachment_schedule_instance_id , monetary_account_id = None , custom_headers = None ) :
""": type user _ id : int
: type monetary _ account _ id : int
: type schedule _ id : int
: type schedule _ instance _ id : int
: type note _ attachment _ schedule _ instance _ id : int
: type custom _ headers : dict [ str , str ] | None
: rtype : BunqResponseNone"""
|
if custom_headers is None :
custom_headers = { }
api_client = client . ApiClient ( cls . _get_api_context ( ) )
endpoint_url = cls . _ENDPOINT_URL_DELETE . format ( cls . _determine_user_id ( ) , cls . _determine_monetary_account_id ( monetary_account_id ) , schedule_id , schedule_instance_id , note_attachment_schedule_instance_id )
response_raw = api_client . delete ( endpoint_url , custom_headers )
return BunqResponseNone . cast_from_bunq_response ( client . BunqResponse ( None , response_raw . headers ) )
|
def parse_cli_args_into ( ) :
"""Creates the cli argparser for application specifics and AWS credentials .
: return : A dict of values from the cli arguments
: rtype : TemplaterCommand"""
|
cli_arg_parser = argparse . ArgumentParser ( parents = [ AWSArgumentParser ( default_role_session_name = 'aws-autodiscovery-templater' ) ] )
main_parser = cli_arg_parser . add_argument_group ( 'AWS Autodiscovery Templater' )
# template _ location = main _ parser . add _ mutually _ exclusive _ group ( required = True )
main_parser . add_argument ( '--template-path' , help = 'Path to the template to fill variables into.' , required = True )
# template _ location . add _ argument ( ' - - template - s3 - uri ' , help = ' S3 URI to the template to fill variables into . ' )
# output = main _ parser . add _ mutually _ exclusive _ group ( required = True )
# output . add _ argument ( ' - - destination - path ' ,
# help = ' Destination for the source once the template has been rendered . ' )
main_parser . add_argument ( '--stdout' , help = 'Prints a json object containing the retrieves resources' , action = 'store_true' , default = False , required = True )
main_parser . add_argument ( '--vpc-ids' , help = ( 'Optionally restrict the filtering to a particular list of IPs. ' 'Comma seperated list of vpc-ids.' ) , action = 'store_true' , default = None )
main_parser . add_argument ( '--filter' , help = ( 'Filter for ec2 instances as defined in http://boto3.readthedocs.org/en/latest/' 'reference/services/ec2.html#EC2.Client.describe_instances' ) , default = None , nargs = '+' )
main_parser . add_argument ( '--filter-empty' , help = ( 'By default, missing values are returned as null to keep private/public ip/hostname' 'sets of equal length. This removes null values from the filter' ) , action = 'store_true' , default = False )
return cli_arg_parser . parse_args ( namespace = TemplateCommand ( ) )
|
def save_positions ( post_data , queryset = None ) :
"""Function to update a queryset of position objects with a post data dict .
: post _ data : Typical post data dictionary like ` ` request . POST ` ` , which
contains the keys of the position inputs .
: queryset : Queryset of the model ` ` ObjectPosition ` ` ."""
|
if not queryset :
queryset = ObjectPosition . objects . all ( )
for key in post_data :
if key . startswith ( 'position-' ) :
try :
obj_id = int ( key . replace ( 'position-' , '' ) )
except ValueError :
continue
queryset . filter ( pk = obj_id ) . update ( position = post_data [ key ] )
|
def delete_collection_api_service ( self , ** kwargs ) :
"""delete collection of APIService
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . delete _ collection _ api _ service ( async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str pretty : If ' true ' , then the output is pretty printed .
: param str _ continue : The continue option should be set when retrieving more results from the server . Since this value is server defined , clients may only use the continue value from a previous query result with identical query parameters ( except for the value of continue ) and the server may reject a continue value it does not recognize . If the specified continue value is no longer valid whether due to expiration ( generally five to fifteen minutes ) or a configuration change on the server , the server will respond with a 410 ResourceExpired error together with a continue token . If the client needs a consistent list , it must restart their list without the continue field . Otherwise , the client may send another list request with the token received with the 410 error , the server will respond with a list starting from the next key , but from the latest snapshot , which is inconsistent from the previous list results - objects that are created , modified , or deleted after the first list request will be included in the response , as long as their keys are after the \" next key \" . This field is not supported when watch is true . Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications .
: param str field _ selector : A selector to restrict the list of returned objects by their fields . Defaults to everything .
: param str label _ selector : A selector to restrict the list of returned objects by their labels . Defaults to everything .
: param int limit : limit is a maximum number of responses to return for a list call . If more items exist , the server will set the ` continue ` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results . Setting a limit may return fewer than the requested amount of items ( up to zero items ) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available . Servers may choose not to support the limit argument and will return all of the available results . If limit is specified and the continue field is empty , clients may assume that no more results are available . This field is not supported if watch is true . The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is , no objects created , modified , or deleted after the first request is issued will be included in any subsequent continued requests . This is sometimes referred to as a consistent snapshot , and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects . If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned .
: param str resource _ version : When specified with a watch call , shows changes that occur after that particular version of a resource . Defaults to changes from the beginning of history . When specified for list : - if unset , then the result is returned from remote storage based on quorum - read flag ; - if it ' s 0 , then we simply return what we currently have in cache , no guarantee ; - if set to non zero , then the result is at least as fresh as given rv .
: param int timeout _ seconds : Timeout for the list / watch call . This limits the duration of the call , regardless of any activity or inactivity .
: param bool watch : Watch for changes to the described resources and return them as a stream of add , update , and remove notifications . Specify resourceVersion .
: return : V1Status
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . delete_collection_api_service_with_http_info ( ** kwargs )
else :
( data ) = self . delete_collection_api_service_with_http_info ( ** kwargs )
return data
|
def hours_estimate ( self , branch = 'master' , grouping_window = 0.5 , single_commit_hours = 0.5 , limit = None , days = None , committer = True , ignore_globs = None , include_globs = None ) :
"""inspired by : https : / / github . com / kimmobrunfeldt / git - hours / blob / 8aaeee237cb9d9028e7a2592a25ad8468b1f45e4 / index . js # L114 - L143
Iterates through the commit history of repo to estimate the time commitement of each author or committer over
the course of time indicated by limit / extensions / days / etc .
: param branch : the branch to return commits for
: param limit : ( optional , default = None ) a maximum number of commits to return , None for no limit
: param grouping _ window : ( optional , default = 0.5 hours ) the threhold for how close two commits need to be to consider them part of one coding session
: param single _ commit _ hours : ( optional , default 0.5 hours ) the time range to associate with one single commit
: param days : ( optional , default = None ) number of days to return , if limit is None
: param committer : ( optional , default = True ) whether to use committer vs . author
: param ignore _ globs : ( optional , default = None ) a list of globs to ignore , default none excludes nothing
: param include _ globs : ( optinal , default = None ) a list of globs to include , default of None includes everything .
: return : DataFrame"""
|
max_diff_in_minutes = grouping_window * 60.0
first_commit_addition_in_minutes = single_commit_hours * 60.0
# First get the commit history
ch = self . commit_history ( branch = branch , limit = limit , days = days , ignore_globs = ignore_globs , include_globs = include_globs )
# split by committer | author
if committer :
by = 'committer'
else :
by = 'author'
people = set ( ch [ by ] . values )
ds = [ ]
for person in people :
commits = ch [ ch [ by ] == person ]
commits_ts = [ x * 10e-10 for x in sorted ( commits . index . values . tolist ( ) ) ]
if len ( commits_ts ) < 2 :
ds . append ( [ person , 0 ] )
continue
def estimate ( index , date ) :
next_ts = commits_ts [ index + 1 ]
diff_in_minutes = next_ts - date
diff_in_minutes /= 60.0
if diff_in_minutes < max_diff_in_minutes :
return diff_in_minutes / 60.0
return first_commit_addition_in_minutes / 60.0
hours = [ estimate ( a , b ) for a , b in enumerate ( commits_ts [ : - 1 ] ) ]
hours = sum ( hours )
ds . append ( [ person , hours ] )
df = DataFrame ( ds , columns = [ by , 'hours' ] )
return df
|
def python_cardinality ( self , subject : str , all_are_optional : bool = False ) -> str :
"""Add the appropriate python typing to subject ( e . g . Optional , List , . . . )
: param subject : Subject to be decorated
: param all _ are _ optional : Force everything to be optional
: return : Typed subject"""
|
if self . multiple_elements :
rval = f"typing.List[{subject}]"
elif self . one_optional_element :
rval = subject if subject . startswith ( "typing.Optional[" ) else f"typing.Optional[{subject}]"
elif self . max == 0 :
rval = "type(None)"
else :
rval = subject
if all_are_optional and not self . one_optional_element :
rval = f"typing.Optional[{rval}]"
return rval
|
def fill_pager ( self , page ) :
"""Fix pager spaces"""
|
tty_size = os . popen ( "stty size" , "r" ) . read ( ) . split ( )
rows = int ( tty_size [ 0 ] ) - 1
lines = sum ( 1 for line in page . splitlines ( ) )
diff = rows - lines
fill = "\n" * diff
if diff > 0 :
return fill
else :
return ""
|
def getOpenIDStore ( filestore_path , table_prefix ) :
"""Returns an OpenID association store object based on the database
engine chosen for this Django application .
* If no database engine is chosen , a filesystem - based store will
be used whose path is filestore _ path .
* If a database engine is chosen , a store object for that database
type will be returned .
* If the chosen engine is not supported by the OpenID library ,
raise ImproperlyConfigured .
* If a database store is used , this will create the tables
necessary to use it . The table names will be prefixed with
table _ prefix . DO NOT use the same table prefix for both an
OpenID consumer and an OpenID server in the same database .
The result of this function should be passed to the Consumer
constructor as the store parameter ."""
|
db_engine = settings . DATABASES [ 'default' ] [ 'ENGINE' ]
if not db_engine :
return FileOpenIDStore ( filestore_path )
# Possible side - effect : create a database connection if one isn ' t
# already open .
connection . cursor ( )
# Create table names to specify for SQL - backed stores .
tablenames = { 'associations_table' : table_prefix + 'openid_associations' , 'nonces_table' : table_prefix + 'openid_nonces' , }
types = { 'django.db.backends.postgresql_psycopg2' : sqlstore . PostgreSQLStore , 'django.db.backends.mysql' : sqlstore . MySQLStore , 'django.db.backends.sqlite3' : sqlstore . SQLiteStore , }
if db_engine not in types :
raise ImproperlyConfigured ( "Database engine %s not supported by OpenID library" % db_engine )
s = types [ db_engine ] ( connection . connection , ** tablenames )
try :
s . createTables ( )
except ( SystemExit , KeyboardInterrupt , MemoryError ) :
raise
except : # XXX This is not the Right Way to do this , but because the
# underlying database implementation might differ in behavior
# at this point , we can ' t reliably catch the right
# exception ( s ) here . Ideally , the SQL store in the OpenID
# library would catch exceptions that it expects and fail
# silently , but that could be bad , too . More ideally , the SQL
# store would not attempt to create tables it knows already
# exists .
pass
return s
|
def send_json_message ( address , message , ** kwargs ) :
"""a shortcut for message sending"""
|
data = { 'message' : message , }
if not kwargs . get ( 'subject_id' ) :
data [ 'subject_id' ] = address
data . update ( kwargs )
hxdispatcher . send ( address , data )
|
def min_eta_for_em_bright ( bh_spin_z , ns_g_mass , mNS_pts , sBH_pts , eta_mins ) :
"""Function that uses the end product of generate _ em _ constraint _ data
to swipe over a set of NS - BH binaries and determine the minimum
symmetric mass ratio required by each binary to yield a remnant
disk mass that exceeds a certain threshold . Each binary passed
to this function consists of a NS mass and a BH spin parameter
component along the orbital angular momentum . Unlike
find _ em _ constraint _ data _ point , which solves the problem at
a given point in the paremter space and is more generic , this
function interpolates the results produced by
generate _ em _ constraint _ data at the desired locations :
generate _ em _ constraint _ data must be run once prior to calling
min _ eta _ for _ em _ bright .
Parameters
bh _ spin _ z : array
desired values of the BH dimensionless spin parameter for the
spin projection along the orbital angular momentum
ns _ g _ mass : array
desired values of the NS gravitational mass ( in solar masses )
mNS _ pts : array
NS mass values ( in solar masses ) from the output of
generate _ em _ constraint _ data
sBH _ pts : array
BH dimensionless spin parameter values along the orbital
angular momentum from the output of generate _ em _ constraint _ data
eta _ mins : array
minimum symmetric mass ratio values to exceed a given remnant
disk mass threshold from the output of generate _ em _ constraint _ data
Returns
eta _ min : array
the minimum symmetric mass ratio required by each binary in the
input to yield a remnant disk mass that exceeds a certain
threshold"""
|
f = scipy . interpolate . RectBivariateSpline ( mNS_pts , sBH_pts , eta_mins , kx = 1 , ky = 1 )
# If bh _ spin _ z is a numpy array ( assuming ns _ g _ mass has the same size )
if isinstance ( bh_spin_z , np . ndarray ) :
eta_min = np . empty ( len ( bh_spin_z ) )
for i in range ( len ( bh_spin_z ) ) :
eta_min [ i ] = f ( ns_g_mass [ i ] , bh_spin_z [ i ] )
# Else ( assuming ns _ g _ mass and bh _ spin _ z are single numbers )
else :
eta_min = f ( ns_g_mass , bh_spin_z )
return eta_min
|
def p_integerdecl_signed ( self , p ) :
'integerdecl : INTEGER SIGNED integernamelist SEMICOLON'
|
intlist = [ Integer ( r , Width ( msb = IntConst ( '31' , lineno = p . lineno ( 3 ) ) , lsb = IntConst ( '0' , lineno = p . lineno ( 3 ) ) , lineno = p . lineno ( 3 ) ) , signed = True , lineno = p . lineno ( 3 ) ) for r in p [ 2 ] ]
p [ 0 ] = Decl ( tuple ( intlist ) , lineno = p . lineno ( 1 ) )
p . set_lineno ( 0 , p . lineno ( 1 ) )
|
def get_projection ( self , axis ) :
"""Return the projection of this vector onto the given axis . The
axis does not need to be normalized ."""
|
scale = axis . dot ( self ) / axis . dot ( axis )
return axis * scale
|
def add ( self , chassis ) :
"""add chassis .
: param chassis : chassis IP address ."""
|
self . chassis_chain [ chassis ] = IxeChassis ( self . session , chassis , len ( self . chassis_chain ) + 1 )
self . chassis_chain [ chassis ] . connect ( )
|
def synchronizeReplica ( self , replicaID , transportType = "esriTransportTypeUrl" , replicaServerGen = None , returnIdsForAdds = False , edits = None , returnAttachmentDatabyURL = False , async = False , syncDirection = "snapshot" , syncLayers = "perReplica" , editsUploadID = None , editsUploadFormat = None , dataFormat = "json" , rollbackOnFailure = True ) :
"""TODO : implement synchronize replica
http : / / resources . arcgis . com / en / help / arcgis - rest - api / index . html # / / 02r300000vv00000"""
|
params = { "f" : "json" , "replicaID" : replicaID , "transportType" : transportType , "dataFormat" : dataFormat , "rollbackOnFailure" : rollbackOnFailure , "async" : async , "returnIdsForAdds" : returnIdsForAdds , "syncDirection" : syncDirection , "returnAttachmentDatabyURL" : returnAttachmentDatabyURL }
return
|
def render ( raw_config , environment = None ) :
"""Renders a config , using it as a template with the environment .
Args :
raw _ config ( str ) : the raw stacker configuration string .
environment ( dict , optional ) : any environment values that should be
passed to the config
Returns :
str : the stacker configuration populated with any values passed from
the environment"""
|
t = Template ( raw_config )
buff = StringIO ( )
if not environment :
environment = { }
try :
substituted = t . substitute ( environment )
except KeyError as e :
raise exceptions . MissingEnvironment ( e . args [ 0 ] )
except ValueError : # Support " invalid " placeholders for lookup placeholders .
substituted = t . safe_substitute ( environment )
if not isinstance ( substituted , str ) :
substituted = substituted . decode ( 'utf-8' )
buff . write ( substituted )
buff . seek ( 0 )
return buff . read ( )
|
def get ( self , request , bot_id , id , format = None ) :
"""Get state by id
serializer : StateSerializer
responseMessages :
- code : 401
message : Not authenticated"""
|
return super ( StateDetail , self ) . get ( request , bot_id , id , format )
|
def save_hdf5 ( X , y , path ) :
"""Save data as a HDF5 file .
Args :
X ( numpy or scipy sparse matrix ) : Data matrix
y ( numpy array ) : Target vector .
path ( str ) : Path to the HDF5 file to save data ."""
|
with h5py . File ( path , 'w' ) as f :
is_sparse = 1 if sparse . issparse ( X ) else 0
f [ 'issparse' ] = is_sparse
f [ 'target' ] = y
if is_sparse :
if not sparse . isspmatrix_csr ( X ) :
X = X . tocsr ( )
f [ 'shape' ] = np . array ( X . shape )
f [ 'data' ] = X . data
f [ 'indices' ] = X . indices
f [ 'indptr' ] = X . indptr
else :
f [ 'data' ] = X
|
def density_separation ( X , labels , cluster_id1 , cluster_id2 , internal_nodes1 , internal_nodes2 , core_distances1 , core_distances2 , metric = 'euclidean' , ** kwd_args ) :
"""Compute the density separation between two clusters . This is the minimum
all - points mutual reachability distance between pairs of points , one from
internal nodes of MSTs of each cluster .
Parameters
X : array ( n _ samples , n _ features ) or ( n _ samples , n _ samples )
The input data of the clustering . This can be the data , or , if
metric is set to ` precomputed ` the pairwise distance matrix used
for the clustering .
labels : array ( n _ samples )
The label array output by the clustering , providing an integral
cluster label to each data point , with - 1 for noise points .
cluster _ id1 : integer
The first cluster label to compute separation between .
cluster _ id2 : integer
The second cluster label to compute separation between .
internal _ nodes1 : array
The vertices of the MST for ` cluster _ id1 ` that were internal vertices .
internal _ nodes2 : array
The vertices of the MST for ` cluster _ id2 ` that were internal vertices .
core _ distances1 : array ( size of cluster _ id1 , )
The all - points - core _ distances of all points in the cluster
specified by cluster _ id1.
core _ distances2 : array ( size of cluster _ id2 , )
The all - points - core _ distances of all points in the cluster
specified by cluster _ id2.
metric : string
The metric used to compute distances for the clustering ( and
to be re - used in computing distances for mr distance ) . If
set to ` precomputed ` then X is assumed to be the precomputed
distance matrix between samples .
* * kwd _ args :
Extra arguments to pass to the distance computation for other
metrics , such as minkowski , Mahanalobis etc .
Returns
The ' density separation ' between the clusters specified by
` cluster _ id1 ` and ` cluster _ id2 ` .
References
Moulavi , D . , Jaskowiak , P . A . , Campello , R . J . , Zimek , A . and Sander , J . ,
2014 . Density - Based Clustering Validation . In SDM ( pp . 839-847 ) ."""
|
if metric == 'precomputed' :
sub_select = X [ labels == cluster_id1 , : ] [ : , labels == cluster_id2 ]
distance_matrix = sub_select [ internal_nodes1 , : ] [ : , internal_nodes2 ]
else :
cluster1 = X [ labels == cluster_id1 ] [ internal_nodes1 ]
cluster2 = X [ labels == cluster_id2 ] [ internal_nodes2 ]
distance_matrix = cdist ( cluster1 , cluster2 , metric , ** kwd_args )
core_dist_matrix1 = np . tile ( core_distances1 [ internal_nodes1 ] , ( distance_matrix . shape [ 1 ] , 1 ) ) . T
core_dist_matrix2 = np . tile ( core_distances2 [ internal_nodes2 ] , ( distance_matrix . shape [ 0 ] , 1 ) )
mr_dist_matrix = np . dstack ( [ distance_matrix , core_dist_matrix1 , core_dist_matrix2 ] ) . max ( axis = - 1 )
return mr_dist_matrix . min ( )
|
def run ( cls , return_results = False ) :
"""Iterates through all associated Fields and applies all attached Rules . Depending on ' return _ collated _ results ' ,
this method will either return True ( all rules successful ) , False ( all , or some , rules failed )
or a dictionary list
containing the collated results of all Field Rules .
Keyword arguments :
return _ collated _ results bool - - Returns dictionary list of Field Rule collated results instead of True or False ."""
|
cls . result = [ ]
passed = True
for field in cls . fields :
result , errors = field . run ( )
results = { 'field' : field . name , 'value' : field . value , 'passed' : result , 'errors' : None }
if errors :
passed = False
results [ 'errors' ] = errors
cls . result . append ( results )
if return_results :
return cls . result
return passed
|
def sudo ( orig ) : # pragma : no cover
"""a nicer version of sudo that uses getpass to ask for a password , or
allows the first argument to be a string password"""
|
prompt = "[sudo] password for %s: " % getpass . getuser ( )
def stdin ( ) :
pw = getpass . getpass ( prompt = prompt ) + "\n"
yield pw
def process ( args , kwargs ) :
password = kwargs . pop ( "password" , None )
if password is None :
pass_getter = stdin ( )
else :
pass_getter = password . rstrip ( "\n" ) + "\n"
kwargs [ "_in" ] = pass_getter
return args , kwargs
cmd = orig . bake ( "-S" , _arg_preprocess = process )
return cmd
|
def get_samp_con ( ) :
"""get sample naming convention"""
|
samp_con , Z = "" , ""
while samp_con == "" :
samp_con = input ( """
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail ltauxe@ucsd.edu for help.
select one:
""" )
if samp_con == "" or samp_con == "1" :
samp_con , Z = "1" , 1
if "4" in samp_con :
if "-" not in samp_con :
print ( "option [4] must be in form 4-Z where Z is an integer" )
samp_con = ""
else :
Z = samp_con . split ( "-" ) [ 1 ]
samp_con = "4"
if "7" in samp_con :
if "-" not in samp_con :
print ( "option [7] must be in form 7-Z where Z is an integer" )
samp_con = ""
else :
Z = samp_con . split ( "-" ) [ 1 ]
samp_con = "7"
if samp_con . isdigit ( ) == False or int ( samp_con ) > 7 :
print ( "Try again\n " )
samp_con = ""
return samp_con , Z
|
def __interact_copy ( self , escape_character = None , input_filter = None , output_filter = None ) :
'''This is used by the interact ( ) method .'''
|
while self . isalive ( ) :
if self . use_poll :
r = poll_ignore_interrupts ( [ self . child_fd , self . STDIN_FILENO ] )
else :
r , w , e = select_ignore_interrupts ( [ self . child_fd , self . STDIN_FILENO ] , [ ] , [ ] )
if self . child_fd in r :
try :
data = self . __interact_read ( self . child_fd )
except OSError as err :
if err . args [ 0 ] == errno . EIO : # Linux - style EOF
break
raise
if data == b'' : # BSD - style EOF
break
if output_filter :
data = output_filter ( data )
self . _log ( data , 'read' )
os . write ( self . STDOUT_FILENO , data )
if self . STDIN_FILENO in r :
data = self . __interact_read ( self . STDIN_FILENO )
if input_filter :
data = input_filter ( data )
i = - 1
if escape_character is not None :
i = data . rfind ( escape_character )
if i != - 1 :
data = data [ : i ]
if data :
self . _log ( data , 'send' )
self . __interact_writen ( self . child_fd , data )
break
self . _log ( data , 'send' )
self . __interact_writen ( self . child_fd , data )
|
def get_mainmarket_ip ( ip , port ) :
"""[ summary ]
Arguments :
ip { [ type ] } - - [ description ]
port { [ type ] } - - [ description ]
Returns :
[ type ] - - [ description ]"""
|
global best_ip
if ip is None and port is None and best_ip [ 'stock' ] [ 'ip' ] is None and best_ip [ 'stock' ] [ 'port' ] is None :
best_ip = select_best_ip ( )
ip = best_ip [ 'stock' ] [ 'ip' ]
port = best_ip [ 'stock' ] [ 'port' ]
elif ip is None and port is None and best_ip [ 'stock' ] [ 'ip' ] is not None and best_ip [ 'stock' ] [ 'port' ] is not None :
ip = best_ip [ 'stock' ] [ 'ip' ]
port = best_ip [ 'stock' ] [ 'port' ]
else :
pass
return ip , port
|
def gev_expval ( xi , mu = 0 , sigma = 1 ) :
"""Expected value of generalized extreme value distribution ."""
|
return mu - ( sigma / xi ) + ( sigma / xi ) * flib . gamfun ( 1 - xi )
|
def ddot ( L , R , left = None , out = None ) :
r"""Dot product of a matrix and a diagonal one .
Args :
L ( array _ like ) : Left matrix .
R ( array _ like ) : Right matrix .
out ( : class : ` numpy . ndarray ` , optional ) : copy result to .
Returns :
: class : ` numpy . ndarray ` : Resulting matrix ."""
|
L = asarray ( L , float )
R = asarray ( R , float )
if left is None :
ok = min ( L . ndim , R . ndim ) == 1 and max ( L . ndim , R . ndim ) == 2
if not ok :
msg = "Wrong array layout. One array should have"
msg += " ndim=1 and the other one ndim=2."
raise ValueError ( msg )
left = L . ndim == 1
if left :
if out is None :
out = copy ( R )
L = L . reshape ( list ( L . shape ) + [ 1 ] * ( R . ndim - 1 ) )
return multiply ( L , R , out = out )
else :
if out is None :
out = copy ( L )
return multiply ( L , R , out = out )
|
def molecule ( lines ) :
"""Parse molfile part into molecule object
Args :
lines ( list ) : lines of molfile part
Raises :
ValueError : Symbol not defined in periodictable . yaml
( Polymer expression not supported yet )"""
|
count_line = lines [ 3 ]
num_atoms = int ( count_line [ 0 : 3 ] )
num_bonds = int ( count_line [ 3 : 6 ] )
# chiral _ flag = int ( count _ line [ 12:15 ] ) # Not used
# num _ prop = int ( count _ line [ 30:33 ] ) # " No longer supported "
compound = Compound ( )
compound . graph . _node = atoms ( lines [ 4 : num_atoms + 4 ] )
compound . graph . _adj = bonds ( lines [ num_atoms + 4 : num_atoms + num_bonds + 4 ] , compound . graph . _node . keys ( ) )
props = properties ( lines [ num_atoms + num_bonds + 4 : ] )
add_properties ( props , compound )
return compound
|
def get ( self , names_to_get , convert_to_numpy = True ) :
"""Loads the requested variables from the matlab com client .
names _ to _ get can be either a variable name or a list of variable names .
If it is a variable name , the values is returned .
If it is a list , a dictionary of variable _ name - > value is returned .
If convert _ to _ numpy is true , the method will all array values to numpy
arrays . Scalars are left as regular python objects ."""
|
self . _check_open ( )
single_itme = isinstance ( names_to_get , ( unicode , str ) )
if single_itme :
names_to_get = [ names_to_get ]
ret = { }
for name in names_to_get :
ret [ name ] = self . client . GetWorkspaceData ( name , 'base' )
# TODO ( daniv ) : Do we really want to reduce dimensions like that ? what if this a row vector ?
while isinstance ( ret [ name ] , ( tuple , list ) ) and len ( ret [ name ] ) == 1 :
ret [ name ] = ret [ name ] [ 0 ]
if convert_to_numpy and isinstance ( ret [ name ] , ( tuple , list ) ) :
ret [ name ] = np . array ( ret [ name ] )
if single_itme :
return ret . values ( ) [ 0 ]
return ret
|
def main ( ) :
"""This is a Toil pipeline to transfer TCGA data into an S3 Bucket
Data is pulled down with Genetorrent and transferred to S3 via S3AM ."""
|
# Define Parser object and add to toil
parser = build_parser ( )
Job . Runner . addToilOptions ( parser )
args = parser . parse_args ( )
# Store inputs from argparse
inputs = { 'genetorrent' : args . genetorrent , 'genetorrent_key' : args . genetorrent_key , 'ssec' : args . ssec , 's3_dir' : args . s3_dir }
# Sanity checks
if args . ssec :
assert os . path . isfile ( args . ssec )
if args . genetorrent :
assert os . path . isfile ( args . genetorrent )
if args . genetorrent_key :
assert os . path . isfile ( args . genetorrent_key )
samples = parse_genetorrent ( args . genetorrent )
# Start pipeline
# map _ job accepts a function , an iterable , and * args . The function is launched as a child
# process with one element from the iterable and * args , which in turn spawns a tree of child jobs .
Job . Runner . startToil ( Job . wrapJobFn ( map_job , download_and_transfer_sample , samples , inputs ) , args )
|
def send ( self , to , amount , from_address = None , fee = None ) :
"""Send bitcoin from your wallet to a single address .
: param str to : recipient bitcoin address
: param int amount : amount to send ( in satoshi )
: param str from _ address : specific address to send from ( optional )
: param int fee : transaction fee in satoshi . Must be greater than the default
fee ( optional ) .
: return : an instance of : class : ` PaymentResponse ` class"""
|
recipient = { to : amount }
return self . send_many ( recipient , from_address , fee )
|
def _series_col_letter ( self , series ) :
"""The letter of the Excel worksheet column in which the data for a
series appears ."""
|
column_number = 1 + series . categories . depth + series . index
return self . _column_reference ( column_number )
|
def _apply_concretization_strategies ( self , addr , strategies , action ) :
"""Applies concretization strategies on the address until one of them succeeds ."""
|
# we try all the strategies in order
for s in strategies : # first , we trigger the SimInspect breakpoint and give it a chance to intervene
e = addr
self . state . _inspect ( 'address_concretization' , BP_BEFORE , address_concretization_strategy = s , address_concretization_action = action , address_concretization_memory = self , address_concretization_expr = e , address_concretization_add_constraints = True )
s = self . state . _inspect_getattr ( 'address_concretization_strategy' , s )
e = self . state . _inspect_getattr ( 'address_concretization_expr' , addr )
# if the breakpoint None ' d out the strategy , we skip it
if s is None :
continue
# let ' s try to apply it !
try :
a = s . concretize ( self , e )
except SimUnsatError :
a = None
# trigger the AFTER breakpoint and give it a chance to intervene
self . state . _inspect ( 'address_concretization' , BP_AFTER , address_concretization_result = a )
a = self . state . _inspect_getattr ( 'address_concretization_result' , a )
# return the result if not None !
if a is not None :
return a
# well , we tried
raise SimMemoryAddressError ( "Unable to concretize address for %s with the provided strategies." % action )
|
def _pickle_to_temp_location_or_memory ( obj ) :
'''If obj can be serialized directly into memory ( via cloudpickle ) this
will return the serialized bytes .
Otherwise , gl _ pickle is attempted and it will then
generates a temporary directory serializes an object into it , returning
the directory name . This directory will not have lifespan greater than
that of unity _ server .'''
|
from . import _cloudpickle as cloudpickle
try : # try cloudpickle first and see if that works
lambda_str = cloudpickle . dumps ( obj )
return lambda_str
except :
pass
# nope . that does not work ! lets try again with gl pickle
filename = _make_temp_filename ( 'pickle' )
from . . import _gl_pickle
pickler = _gl_pickle . GLPickler ( filename )
pickler . dump ( obj )
pickler . close ( )
return filename
|
def ffmpeg_version ( ) :
"""Returns the available ffmpeg version
Returns
version : str
version number as string"""
|
cmd = [ 'ffmpeg' , '-version' ]
output = sp . check_output ( cmd )
aac_codecs = [ x for x in output . splitlines ( ) if "ffmpeg version " in str ( x ) ] [ 0 ]
hay = aac_codecs . decode ( 'ascii' )
match = re . findall ( r'ffmpeg version (\d+\.)?(\d+\.)?(\*|\d+)' , hay )
if match :
return "" . join ( match [ 0 ] )
else :
return None
|
def pyprf ( strCsvCnfg , lgcTest = False , varRat = None , strPathHrf = None ) :
"""Main function for pRF mapping .
Parameters
strCsvCnfg : str
Absolute file path of config file .
lgcTest : Boolean
Whether this is a test ( pytest ) . If yes , absolute path of pyprf libary
will be prepended to config file paths .
varRat : float , default None
Ratio of size suppressive surround to size of center pRF
strPathHrf : str or None :
Path to npy file with custom hrf parameters . If None , default
parameters will be used ."""
|
# * * * Check time
print ( '---pRF analysis' )
varTme01 = time . time ( )
# * * * Preparations
# Load config parameters from csv file into dictionary :
dicCnfg = load_config ( strCsvCnfg , lgcTest = lgcTest )
# Load config parameters from dictionary into namespace :
cfg = cls_set_config ( dicCnfg )
# Conditional imports :
if cfg . strVersion == 'gpu' :
from pyprf_feature . analysis . find_prf_gpu import find_prf_gpu
if ( ( cfg . strVersion == 'cython' ) or ( cfg . strVersion == 'numpy' ) ) :
from pyprf_feature . analysis . find_prf_cpu import find_prf_cpu
# Convert preprocessing parameters ( for temporal smoothing )
# from SI units ( i . e . [ s ] ) into units of data array ( volumes ) :
cfg . varSdSmthTmp = np . divide ( cfg . varSdSmthTmp , cfg . varTr )
# * * * Create or load pRF time course models
# Create model time courses . Also return logical for inclusion of model
# parameters which will be needed later when we create model parameters
# in degree .
aryPrfTc , lgcMdlInc = model_creation ( dicCnfg , varRat = varRat , strPathHrf = strPathHrf )
# Deduce the number of features from the pRF time course models array
cfg . varNumFtr = aryPrfTc . shape [ 1 ]
# * * * Preprocessing
# The model time courses will be preprocessed such that they are smoothed
# ( temporally ) with same factor as the data and that they will be z - scored :
aryPrfTc = prep_models ( aryPrfTc , varSdSmthTmp = cfg . varSdSmthTmp )
# The functional data will be masked and demeaned :
aryLgcMsk , aryLgcVar , hdrMsk , aryAff , aryFunc , tplNiiShp = prep_func ( cfg . strPathNiiMask , cfg . lstPathNiiFunc , varAvgThr = - 100 )
# set the precision of the header to np . float32 so that the prf results
# will be saved in this precision later
hdrMsk . set_data_dtype ( np . float32 )
# * * * Checks
# Make sure that if gpu fitting is used , the number of cross - validations is
# set to 1 , not higher
if cfg . strVersion == 'gpu' :
strErrMsg = 'Stopping program. ' + 'Cross-validation on GPU is currently not supported. ' + 'Set varNumXval equal to 1 in csv file in order to continue. '
assert cfg . varNumXval == 1 , strErrMsg
# For the GPU version , we need to set down the parallelisation to 1 now ,
# because no separate CPU threads are to be created . We may still use CPU
# parallelisation for preprocessing , which is why the parallelisation
# factor is only reduced now , not earlier .
if cfg . strVersion == 'gpu' :
cfg . varPar = 1
# Make sure that if cython is used , the number of features is 1 or 2,
# not higher
if cfg . strVersion == 'cython' :
strErrMsg = 'Stopping program. ' + 'Cython is not supported for more features than 1. ' + 'Set strVersion equal \'numpy\'.'
assert cfg . varNumFtr in [ 1 , 2 ] , strErrMsg
# Check whether we need to crossvalidate
if np . greater ( cfg . varNumXval , 1 ) :
cfg . lgcXval = True
elif np . equal ( cfg . varNumXval , 1 ) :
cfg . lgcXval = False
strErrMsg = 'Stopping program. ' + 'Set numXval (number of crossvalidation folds) to 1 or higher'
assert np . greater_equal ( cfg . varNumXval , 1 ) , strErrMsg
# * * * Find pRF models for voxel time courses
print ( '------Find pRF models for voxel time courses' )
# Number of voxels for which pRF finding will be performed :
cfg . varNumVoxInc = aryFunc . shape [ 0 ]
print ( '---------Number of voxels on which pRF finding will be performed: ' + str ( cfg . varNumVoxInc ) )
print ( '---------Number of features pRF finding will be performed with: ' + str ( cfg . varNumFtr ) )
print ( '---------Preparing parallel pRF model finding' )
# Get array with all possible model parameter combination :
# [ x positions , y positions , sigmas ]
aryMdlParams = crt_mdl_prms ( ( int ( cfg . varVslSpcSzeX ) , int ( cfg . varVslSpcSzeY ) ) , cfg . varNum1 , cfg . varExtXmin , cfg . varExtXmax , cfg . varNum2 , cfg . varExtYmin , cfg . varExtYmax , cfg . varNumPrfSizes , cfg . varPrfStdMin , cfg . varPrfStdMax , kwUnt = 'deg' , kwCrd = cfg . strKwCrd )
# Exclude models with prf center outside stimulated area
aryMdlParams = aryMdlParams [ lgcMdlInc , : ]
# Empty list for results ( parameters of best fitting pRF model ) :
lstPrfRes = [ None ] * cfg . varPar
# Empty list for processes :
lstPrcs = [ None ] * cfg . varPar
# Create a queue to put the results in :
queOut = mp . Queue ( )
# Create list with chunks of functional data for the parallel processes :
lstFunc = np . array_split ( aryFunc , cfg . varPar )
# We don ' t need the original array with the functional data anymore :
del ( aryFunc )
# Prepare dictionary to pass as kwargs to find _ prf _ cpu
dctKw = { 'lgcRstr' : None , 'lgcPrint' : True }
# CPU version ( using numpy or cython for pRF finding ) :
if ( ( cfg . strVersion == 'numpy' ) or ( cfg . strVersion == 'cython' ) ) :
print ( '---------pRF finding on CPU' )
print ( '---------Creating parallel processes' )
# Create processes :
for idxPrc in range ( 0 , cfg . varPar ) :
lstPrcs [ idxPrc ] = mp . Process ( target = find_prf_cpu , args = ( idxPrc , lstFunc [ idxPrc ] , aryPrfTc , aryMdlParams , cfg . strVersion , cfg . lgcXval , cfg . varNumXval , queOut ) , kwargs = dctKw , )
# Daemon ( kills processes when exiting ) :
lstPrcs [ idxPrc ] . Daemon = True
# GPU version ( using tensorflow for pRF finding ) :
elif cfg . strVersion == 'gpu' :
print ( '---------pRF finding on GPU' )
# Create processes :
for idxPrc in range ( 0 , cfg . varPar ) :
lstPrcs [ idxPrc ] = mp . Process ( target = find_prf_gpu , args = ( idxPrc , aryMdlParams , lstFunc [ idxPrc ] , aryPrfTc , queOut ) , kwargs = dctKw , )
# Daemon ( kills processes when exiting ) :
lstPrcs [ idxPrc ] . Daemon = True
# Start processes :
for idxPrc in range ( 0 , cfg . varPar ) :
lstPrcs [ idxPrc ] . start ( )
# Delete reference to list with function data ( the data continues to exists
# in child process ) :
del ( lstFunc )
# Collect results from queue :
for idxPrc in range ( 0 , cfg . varPar ) :
lstPrfRes [ idxPrc ] = queOut . get ( True )
# Join processes :
for idxPrc in range ( 0 , cfg . varPar ) :
lstPrcs [ idxPrc ] . join ( )
# * * * Prepare pRF finding results for export
print ( '---------Prepare pRF finding results for export' )
# Put output into correct order :
lstPrfRes = sorted ( lstPrfRes )
# collect results from parallelization
aryBstXpos = joinRes ( lstPrfRes , cfg . varPar , 1 , inFormat = '1D' )
aryBstYpos = joinRes ( lstPrfRes , cfg . varPar , 2 , inFormat = '1D' )
aryBstSd = joinRes ( lstPrfRes , cfg . varPar , 3 , inFormat = '1D' )
aryBstR2 = joinRes ( lstPrfRes , cfg . varPar , 4 , inFormat = '1D' )
aryBstBts = joinRes ( lstPrfRes , cfg . varPar , 5 , inFormat = '2D' )
if np . greater ( cfg . varNumXval , 1 ) :
aryBstR2Single = joinRes ( lstPrfRes , cfg . varPar , 6 , inFormat = '2D' )
# Delete unneeded large objects :
del ( lstPrfRes )
# Calculate polar angle map :
aryPlrAng = np . arctan2 ( aryBstYpos , aryBstXpos )
# Calculate eccentricity map ( r = sqrt ( x ^ 2 + y ^ 2 ) ) :
aryEcc = np . sqrt ( np . add ( np . square ( aryBstXpos ) , np . square ( aryBstYpos ) ) )
# Export each map of best parameters as a 3D nii file
print ( '---------Exporting results' )
# Append ' hrf ' to cfg . strPathOut , if fitting was done with custom hrf
if strPathHrf is not None :
cfg . strPathOut = cfg . strPathOut + '_hrf'
# Xoncatenate all the best voxel maps
aryBstMaps = np . stack ( [ aryBstXpos , aryBstYpos , aryBstSd , aryBstR2 , aryPlrAng , aryEcc ] , axis = 1 )
# List with name suffices of output images :
lstNiiNames = [ '_x_pos' , '_y_pos' , '_SD' , '_R2' , '_polar_angle' , '_eccentricity' ]
# Append ratio to nii file name , if fitting was done with sup surround
if varRat is not None :
lstNiiNames = [ strNii + '_' + str ( varRat ) for strNii in lstNiiNames ]
# Create full path names from nii file names and output path
lstNiiNames = [ cfg . strPathOut + strNii + '.nii.gz' for strNii in lstNiiNames ]
# export map results as seperate 3D nii files
export_nii ( aryBstMaps , lstNiiNames , aryLgcMsk , aryLgcVar , tplNiiShp , aryAff , hdrMsk , outFormat = '3D' )
# Save beta parameter estimates for every feature :
# List with name suffices of output images :
lstNiiNames = [ '_Betas' ]
# Append ratio to nii file name , if fitting was done with sup surround
if varRat is not None :
lstNiiNames = [ strNii + '_' + str ( varRat ) for strNii in lstNiiNames ]
# Create full path names from nii file names and output path
lstNiiNames = [ cfg . strPathOut + strNii + '.nii.gz' for strNii in lstNiiNames ]
# export beta parameter as a single 4D nii file
export_nii ( aryBstBts , lstNiiNames , aryLgcMsk , aryLgcVar , tplNiiShp , aryAff , hdrMsk , outFormat = '4D' )
# Save R2 maps from crossvalidation ( saved for every run ) as nii :
if np . greater ( cfg . varNumXval , 1 ) : # truncate extremely negative R2 values
aryBstR2Single [ np . where ( np . less_equal ( aryBstR2Single , - 1.0 ) ) ] = - 1.0
# List with name suffices of output images :
lstNiiNames = [ '_R2_single' ]
# Append ratio to nii file name , if fitting was done with sup surround
if varRat is not None :
lstNiiNames = [ strNii + '_' + str ( varRat ) for strNii in lstNiiNames ]
# Create full path names from nii file names and output path
lstNiiNames = [ cfg . strPathOut + strNii + '.nii.gz' for strNii in lstNiiNames ]
# export R2 maps as a single 4D nii file
export_nii ( aryBstR2Single , lstNiiNames , aryLgcMsk , aryLgcVar , tplNiiShp , aryAff , hdrMsk , outFormat = '4D' )
# * * * Report time
varTme02 = time . time ( )
varTme03 = varTme02 - varTme01
print ( '---Elapsed time: ' + str ( varTme03 ) + ' s' )
print ( '---Done.' )
|
def _gen_toc ( self , idx ) :
"""生成目录html
: return :
: rtype :"""
|
if not self . _chapterization :
return ''
start , end = self . _start_end_of_index ( idx )
chap_infos = '\n' . join ( [ " <li><a href=\"#ch{}\">{}</a></li>" . format ( chap . _idx , chap . _title ) for chap in self . _chapters [ start : end ] ] )
toc = """
<div id="toc">
<h2>
目录<br />
</h2>
<ul>
{}
</ul>
</div>
<div class="pagebreak"></div>
""" . format ( chap_infos )
return toc
|
def load_migration_file ( self , filename ) :
"""Load migration file as module ."""
|
path = os . path . join ( self . directory , filename )
# spec = spec _ from _ file _ location ( " migration " , path )
# module = module _ from _ spec ( spec )
# spec . loader . exec _ module ( module )
module = imp . load_source ( "migration" , path )
return module
|
def get_scope ( self , which , labels ) :
""": param which : str , description of the image this belongs to
: param labels : dict , labels on the image"""
|
try :
scope_choice = labels [ self . SCOPE_LABEL ]
except ( KeyError , TypeError ) :
self . log . debug ( "no distribution scope set for %s image" , which )
raise NothingToCheck
try :
scope = self . SCOPE_NAME . index ( scope_choice )
except ValueError :
self . log . warning ( "invalid label %s=%s for %s image" , self . SCOPE_LABEL , scope_choice , which )
raise NothingToCheck
return scope
|
def cli ( ) :
"""Entry point for the application script"""
|
parser = get_argparser ( )
args = parser . parse_args ( )
check_args ( args )
if args . v :
print ( 'ERAlchemy version {}.' . format ( __version__ ) )
exit ( 0 )
render_er ( args . i , args . o , include_tables = args . include_tables , include_columns = args . include_columns , exclude_tables = args . exclude_tables , exclude_columns = args . exclude_columns , schema = args . s )
|
def show_member ( self , member , ** _params ) :
"""Fetches information of a certain load balancer member ."""
|
return self . get ( self . member_path % ( member ) , params = _params )
|
def ToURN ( self ) :
"""Converts a reference into an URN ."""
|
if self . path_type in [ PathInfo . PathType . OS , PathInfo . PathType . TSK ] :
return rdfvalue . RDFURN ( self . client_id ) . Add ( "fs" ) . Add ( self . path_type . name . lower ( ) ) . Add ( "/" . join ( self . path_components ) )
elif self . path_type == PathInfo . PathType . REGISTRY :
return rdfvalue . RDFURN ( self . client_id ) . Add ( "registry" ) . Add ( "/" . join ( self . path_components ) )
elif self . path_type == PathInfo . PathType . TEMP :
return rdfvalue . RDFURN ( self . client_id ) . Add ( "temp" ) . Add ( "/" . join ( self . path_components ) )
raise ValueError ( "Unsupported path type: %s" % self . path_type )
|
def get_table_description ( self , cursor , table_name , identity_check = True ) :
"""Returns a description of the table , with DB - API cursor . description interface .
The ' auto _ check ' parameter has been added to the function argspec .
If set to True , the function will check each of the table ' s fields for the
IDENTITY property ( the IDENTITY property is the MSSQL equivalent to an AutoField ) .
When a field is found with an IDENTITY property , it is given a custom field number
of SQL _ AUTOFIELD , which maps to the ' AutoField ' value in the DATA _ TYPES _ REVERSE dict ."""
|
# map pyodbc ' s cursor . columns to db - api cursor description
columns = [ [ c [ 3 ] , c [ 4 ] , None , c [ 6 ] , c [ 6 ] , c [ 8 ] , c [ 10 ] ] for c in cursor . columns ( table = table_name ) ]
items = [ ]
for column in columns :
if identity_check and self . _is_auto_field ( cursor , table_name , column [ 0 ] ) :
column [ 1 ] = SQL_AUTOFIELD
# The conversion from TextField to CharField below is unwise .
# A SQLServer db field of type " Text " is not interchangeable with a CharField , no matter how short its max _ length .
# For example , model . objects . values ( < text _ field _ name > ) . count ( ) will fail on a sqlserver ' text ' field
if column [ 1 ] == Database . SQL_WVARCHAR and column [ 3 ] < 4000 :
column [ 1 ] = Database . SQL_WCHAR
items . append ( column )
return items
|
def collect_members ( module_to_name ) :
"""Collect all symbols from a list of modules .
Args :
module _ to _ name : Dictionary mapping modules to short names .
Returns :
Dictionary mapping name to ( fullname , member ) pairs ."""
|
members = { }
for module , module_name in module_to_name . items ( ) :
all_names = getattr ( module , "__all__" , None )
for name , member in inspect . getmembers ( module ) :
if ( ( inspect . isfunction ( member ) or inspect . isclass ( member ) ) and not _always_drop_symbol_re . match ( name ) and ( all_names is None or name in all_names ) ) :
fullname = '%s.%s' % ( module_name , name )
if name in members :
other_fullname , other_member = members [ name ]
if member is not other_member :
raise RuntimeError ( "Short name collision between %s and %s" % ( fullname , other_fullname ) )
if len ( fullname ) == len ( other_fullname ) :
raise RuntimeError ( "Can't decide whether to use %s or %s for %s: " "both full names have length %d" % ( fullname , other_fullname , name , len ( fullname ) ) )
if len ( fullname ) > len ( other_fullname ) :
continue
# Use the shorter full name
members [ name ] = fullname , member
return members
|
def transform ( self , y , exogenous = None , ** _ ) :
"""Transform the new array
Apply the Box - Cox transformation to the array after learning the
lambda parameter .
Parameters
y : array - like or None , shape = ( n _ samples , )
The endogenous ( time - series ) array .
exogenous : array - like or None , shape = ( n _ samples , n _ features ) , optional
The exogenous array of additional covariates . Not used for
endogenous transformers . Default is None , and non - None values will
serve as pass - through arrays .
Returns
y _ transform : array - like or None
The Box - Cox transformed y array
exogenous : array - like or None
The exog array"""
|
check_is_fitted ( self , "lam1_" )
lam1 = self . lam1_
lam2 = self . lam2_
y , exog = self . _check_y_exog ( y , exogenous )
y += lam2
neg_mask = y <= 0.
if neg_mask . any ( ) :
action = self . neg_action
msg = "Negative or zero values present in y"
if action == "raise" :
raise ValueError ( msg )
elif action == "warn" :
warnings . warn ( msg , UserWarning )
y [ neg_mask ] = self . floor
if lam1 == 0 :
return np . log ( y ) , exog
return ( y ** lam1 - 1 ) / lam1 , exog
|
def _copyValues ( self , to , extra = None ) :
"""Copies param values from this instance to another instance for
params shared by them .
: param to : the target instance
: param extra : extra params to be copied
: return : the target instance with param values copied"""
|
paramMap = self . _paramMap . copy ( )
if extra is not None :
paramMap . update ( extra )
for param in self . params : # copy default params
if param in self . _defaultParamMap and to . hasParam ( param . name ) :
to . _defaultParamMap [ to . getParam ( param . name ) ] = self . _defaultParamMap [ param ]
# copy explicitly set params
if param in paramMap and to . hasParam ( param . name ) :
to . _set ( ** { param . name : paramMap [ param ] } )
return to
|
def remove ( self , key , column_path , timestamp , consistency_level ) :
"""Remove data from the row specified by key at the granularity specified by column _ path , and the given timestamp . Note
that all the values in column _ path besides column _ path . column _ family are truly optional : you can remove the entire
row by just specifying the ColumnFamily , or you can remove a SuperColumn or a single Column by specifying those levels too .
Parameters :
- key
- column _ path
- timestamp
- consistency _ level"""
|
self . _seqid += 1
d = self . _reqs [ self . _seqid ] = defer . Deferred ( )
self . send_remove ( key , column_path , timestamp , consistency_level )
return d
|
def join_channel ( self , channel ) :
"""Join a different chat channel on Twitch .
Note , this function returns immediately , but the switch might
take a moment
: param channel : name of the channel ( without # )"""
|
self . s . send ( ( 'JOIN #%s\r\n' % channel ) . encode ( 'utf-8' ) )
if self . verbose :
print ( 'JOIN #%s\r\n' % channel )
|
def prune_directory ( self ) :
"""Delete any objects that can be loaded and are expired according to
the current lifetime setting .
A file will be deleted if the following conditions are met :
- The file extension matches : py : meth : ` bucketcache . backends . Backend . file _ extension `
- The object can be loaded by the configured backend .
- The object ' s expiration date has passed .
Returns :
File size and number of files deleted .
: rtype : : py : class : ` ~ bucketcache . utilities . PrunedFilesInfo `
. . note : :
For any buckets that share directories , ` ` prune _ directory ` ` will
affect files saved with both , if they use the same backend class .
This is not destructive , because only files that have expired
according to the lifetime of the original bucket are deleted ."""
|
glob = '*.{ext}' . format ( ext = self . backend . file_extension )
totalsize = 0
totalnum = 0
for f in self . _path . glob ( glob ) :
filesize = f . stat ( ) . st_size
key_hash = f . stem
in_cache = key_hash in self . _cache
try :
self . _get_obj_from_hash ( key_hash )
except KeyExpirationError : # File has been deleted by ` _ get _ obj _ from _ hash `
totalsize += filesize
totalnum += 1
except KeyInvalidError :
pass
except Exception :
raise
else :
if not in_cache :
del self . _cache [ key_hash ]
return PrunedFilesInfo ( size = totalsize , num = totalnum )
|
def _get ( self , locator , expected_condition , params = None , timeout = None , error_msg = "" , driver = None , ** kwargs ) :
"""Get elements based on locator with optional parameters .
Uses selenium . webdriver . support . expected _ conditions to determine the state of the element ( s ) .
: param locator : element identifier
: param expected _ condition : expected condition of element ( ie . visible , clickable , etc )
: param params : ( optional ) locator parameters
: param timeout : ( optional ) time to wait for element ( default : self . _ explicit _ wait )
: param error _ msg : ( optional ) customized error message
: param driver : ( optional ) alternate Webdriver instance ( example : parent - element )
: param kwargs : optional arguments to expected conditions
: return : WebElement instance , list of WebElements , or None"""
|
from selenium . webdriver . support . ui import WebDriverWait
if not isinstance ( locator , WebElement ) :
error_msg += "\nLocator of type <{}> with selector <{}> with params <{params}>" . format ( * locator , params = params )
locator = self . __class__ . get_compliant_locator ( * locator , params = params )
_driver = driver or self . driver
exp_cond = expected_condition ( locator , ** kwargs )
if timeout == 0 :
try :
return exp_cond ( _driver )
except NoSuchElementException :
return None
if timeout is None :
timeout = self . _explicit_wait
error_msg += "\nExpected condition: {}" "\nTimeout: {}" . format ( expected_condition , timeout )
return WebDriverWait ( _driver , timeout ) . until ( exp_cond , error_msg )
|
def create_from_pointer ( cls , pointer ) :
""": type pointer : Pointer"""
|
instance = cls . __new__ ( cls )
instance . pointer = pointer
instance . label_monetary_account = LabelMonetaryAccount ( )
instance . label_monetary_account . _iban = pointer . value
instance . label_monetary_account . _display_name = pointer . name
return instance
|
def close ( self ) :
"""Close the client . This includes closing the Session
and CBS authentication layer as well as the Connection .
If the client was opened using an external Connection ,
this will be left intact .
No further messages can be sent or received and the client
cannot be re - opened .
All pending , unsent messages will remain uncleared to allow
them to be inspected and queued to a new client ."""
|
if self . message_handler :
self . message_handler . destroy ( )
self . message_handler = None
self . _shutdown = True
if self . _keep_alive_thread :
self . _keep_alive_thread . join ( )
self . _keep_alive_thread = None
if not self . _session :
return
# already closed .
if not self . _connection . cbs :
_logger . debug ( "Closing non-CBS session." )
self . _session . destroy ( )
else :
_logger . debug ( "CBS session pending." )
self . _session = None
if not self . _ext_connection :
_logger . debug ( "Closing exclusive connection." )
self . _connection . destroy ( )
else :
_logger . debug ( "Shared connection remaining open." )
self . _connection = None
|
def load_func ( serializer_type ) :
"""A decorator for ` ` _ load ( loader _ func = loader _ func , * * kwargs ) ` `"""
|
def outer_wrapper ( loader_func ) :
def wrapper ( * args , ** kwargs ) :
return _load ( * args , loader_func = loader_func , serializer_type = serializer_type , ** kwargs )
return wrapper
return outer_wrapper
|
def create_api_pool_deploy ( self ) :
"""Get an instance of Api Pool Deploy services facade ."""
|
return ApiPoolDeploy ( self . networkapi_url , self . user , self . password , self . user_ldap )
|
def run_breiman85 ( ) :
"""Run Breiman 85 sample ."""
|
x , y = build_sample_ace_problem_breiman85 ( 200 )
ace_solver = ace . ACESolver ( )
ace_solver . specify_data_set ( x , y )
ace_solver . solve ( )
try :
ace . plot_transforms ( ace_solver , 'sample_ace_breiman85.png' )
except ImportError :
pass
return ace_solver
|
def buildEXPmask ( self , chip , dqarr ) :
"""Builds a weight mask from an input DQ array and the exposure time
per pixel for this chip ."""
|
log . info ( "Applying EXPTIME weighting to DQ mask for chip %s" % chip )
# exparr = self . getexptimeimg ( chip )
exparr = self . _image [ self . scienceExt , chip ] . _exptime
expmask = exparr * dqarr
return expmask . astype ( np . float32 )
|
def check_allowed_letters ( seq , allowed_letters_as_set ) :
"""Validate sequence : Rise an error if sequence contains undesirable letters ."""
|
# set of unique letters in sequence
seq_set = set ( seq )
not_allowed_letters_in_seq = [ x for x in seq_set if str ( x ) . upper ( ) not in allowed_letters_as_set ]
if len ( not_allowed_letters_in_seq ) > 0 :
raise forms . ValidationError ( "This sequence type cannot contain letters: " + ", " . join ( not_allowed_letters_in_seq ) )
|
def _groupname ( ) :
'''Grain for the minion groupname'''
|
if grp :
try :
groupname = grp . getgrgid ( os . getgid ( ) ) . gr_name
except KeyError :
groupname = ''
else :
groupname = ''
return groupname
|
def checkPrediction2 ( self , patternNZs , output = None , confidence = None , details = False ) :
"""This function will replace checkPrediction .
This function produces goodness - of - match scores for a set of input patterns , by
checking for their presense in the current and predicted output of the TP .
Returns a global count of the number of extra and missing bits , the
confidence scores for each input pattern , and ( if requested ) the
bits in each input pattern that were not present in the TP ' s prediction .
todo : Add option to check predictedState only .
Parameters :
patternNZs : a list of input patterns that we want to check for . Each element
is a list of the non - zeros in that pattern .
output : The output of the TP . If not specified , then use the
TP ' s current output . This can be specified if you are
trying to check the prediction metric for an output from
the past .
confidence : The cell confidences . If not specified , then use the
TP ' s current self . confidence . This can be specified if you are
trying to check the prediction metrics for an output
from the past .
details : if True , also include details of missing bits per pattern .
Return value :
The following list is returned :
totalExtras ,
totalMissing ,
[ conf _ 1 , conf _ 2 , . . . ] ,
[ missing1 , missing2 , . . . ]
totalExtras : a global count of the number of ' extras ' , i . e . bits that
are on in the current output but not in the or of all the
passed in patterns
totalMissing : a global count of all the missing bits , i . e . the bits that
are on in the or of the patterns , but not in the current
output
conf _ i the confidence score for the i ' th pattern in patternsToCheck
missing _ i the bits in the i ' th pattern that were missing
in the output . This list is only returned if details is
True ."""
|
# Get the non - zeros in each pattern
numPatterns = len ( patternNZs )
# Compute the union of all the expected patterns
orAll = set ( )
orAll = orAll . union ( * patternNZs )
# Get the list of active columns in the output
if output is None :
assert self . currentOutput is not None
output = self . currentOutput
output = set ( output . sum ( axis = 1 ) . nonzero ( ) [ 0 ] )
# Compute the total extra and missing in the output
totalExtras = len ( output . difference ( orAll ) )
totalMissing = len ( orAll . difference ( output ) )
# Get the percent confidence level per column by summing the confidence levels
# of the cells in the column . During training , each segment ' s confidence
# number is computed as a running average of how often it correctly
# predicted bottom - up activity on that column . A cell ' s confidence number
# is taken from the first active segment found in the cell . Note that
# confidence will only be non - zero for predicted columns .
if confidence is None :
confidence = self . confidence [ 't' ]
# Set the column confidence to be the max of the cell confidences in that
# column .
colConfidence = self . columnConfidences ( confidence )
# Assign confidences to each pattern
confidences = [ ]
for i in xrange ( numPatterns ) : # Sum of the column confidences for this pattern
positivePredictionSum = colConfidence [ patternNZs [ i ] ] . sum ( )
# How many columns in this pattern
positiveColumnCount = len ( patternNZs [ i ] )
# Sum of all the column confidences
totalPredictionSum = colConfidence . sum ( )
# Total number of columns
totalColumnCount = len ( colConfidence )
negativePredictionSum = totalPredictionSum - positivePredictionSum
negativeColumnCount = totalColumnCount - positiveColumnCount
# Compute the average confidence score per column for this pattern
if positiveColumnCount != 0 :
positivePredictionScore = positivePredictionSum / positiveColumnCount
else :
positivePredictionScore = 0.0
# Compute the average confidence score per column for the other patterns
if negativeColumnCount != 0 :
negativePredictionScore = negativePredictionSum / negativeColumnCount
else :
negativePredictionScore = 0.0
predictionScore = positivePredictionScore - negativePredictionScore
confidences . append ( ( predictionScore , positivePredictionScore , negativePredictionScore ) )
# Include detail ? ( bits in each pattern that were missing from the output )
if details :
missingPatternBits = [ set ( pattern ) . difference ( output ) for pattern in patternNZs ]
return ( totalExtras , totalMissing , confidences , missingPatternBits )
else :
return ( totalExtras , totalMissing , confidences )
|
def decrypt_element ( encrypted_data , key , debug = False , inplace = False ) :
"""Decrypts an encrypted element .
: param encrypted _ data : The encrypted data .
: type : lxml . etree . Element | DOMElement | basestring
: param key : The key .
: type : string
: param debug : Activate the xmlsec debug
: type : bool
: param inplace : update passed data with decrypted result
: type : bool
: returns : The decrypted element .
: rtype : lxml . etree . Element"""
|
if isinstance ( encrypted_data , Element ) :
encrypted_data = OneLogin_Saml2_XML . to_etree ( str ( encrypted_data . toxml ( ) ) )
if not inplace and isinstance ( encrypted_data , OneLogin_Saml2_XML . _element_class ) :
encrypted_data = deepcopy ( encrypted_data )
elif isinstance ( encrypted_data , OneLogin_Saml2_XML . _text_class ) :
encrypted_data = OneLogin_Saml2_XML . _parse_etree ( encrypted_data )
xmlsec . enable_debug_trace ( debug )
manager = xmlsec . KeysManager ( )
manager . add_key ( xmlsec . Key . from_memory ( key , xmlsec . KeyFormat . PEM , None ) )
enc_ctx = xmlsec . EncryptionContext ( manager )
return enc_ctx . decrypt ( encrypted_data )
|
def get_tr ( self ) :
"""Returns the top right border of the cell"""
|
cell_above = CellBorders ( self . cell_attributes , * self . cell . get_above_key_rect ( ) )
return cell_above . get_r ( )
|
def delete ( name , remove = False , force = False , root = None ) :
'''Remove a user from the minion
name
Username to delete
remove
Remove home directory and mail spool
force
Force some actions that would fail otherwise
root
Directory to chroot into
CLI Example :
. . code - block : : bash
salt ' * ' user . delete name remove = True force = True'''
|
cmd = [ 'userdel' ]
if remove :
cmd . append ( '-r' )
if force and __grains__ [ 'kernel' ] != 'OpenBSD' and __grains__ [ 'kernel' ] != 'AIX' :
cmd . append ( '-f' )
cmd . append ( name )
if root is not None and __grains__ [ 'kernel' ] != 'AIX' :
cmd . extend ( ( '-R' , root ) )
ret = __salt__ [ 'cmd.run_all' ] ( cmd , python_shell = False )
if ret [ 'retcode' ] == 0 : # Command executed with no errors
return True
if ret [ 'retcode' ] == 12 : # There ' s a known bug in Debian based distributions , at least , that
# makes the command exit with 12 , see :
# https : / / bugs . launchpad . net / ubuntu / + source / shadow / + bug / 1023509
if __grains__ [ 'os_family' ] not in ( 'Debian' , ) :
return False
if 'var/mail' in ret [ 'stderr' ] or 'var/spool/mail' in ret [ 'stderr' ] : # We ' ve hit the bug , let ' s log it and not fail
log . debug ( 'While the userdel exited with code 12, this is a known bug on ' 'debian based distributions. See http://goo.gl/HH3FzT' )
return True
return False
|
def make_template_paths ( template_file , paths = None ) :
"""Make up a list of template search paths from given ' template _ file '
( absolute or relative path to the template file ) and / or ' paths ' ( a list of
template search paths given by user ) .
NOTE : User - given ' paths ' will take higher priority over a dir of
template _ file .
: param template _ file : Absolute or relative path to the template file
: param paths : A list of template search paths
: return : List of template paths ( [ str ] )
> > > make _ template _ paths ( " / path / to / a / template " )
[ ' / path / to / a ' ]
> > > make _ template _ paths ( " / path / to / a / template " , [ " / tmp " ] )
[ ' / tmp ' , ' / path / to / a ' ]
> > > os . chdir ( " / tmp " )
> > > make _ template _ paths ( " . / path / to / a / template " )
[ ' / tmp / path / to / a ' ]
> > > make _ template _ paths ( " . / path / to / a / template " , [ " / tmp " ] )
[ ' / tmp ' , ' / tmp / path / to / a ' ]"""
|
tmpldir = os . path . abspath ( os . path . dirname ( template_file ) )
return [ tmpldir ] if paths is None else paths + [ tmpldir ]
|
def _create_base_ensemble ( self , out , n_estimators , n_folds ) :
"""For each base estimator collect models trained on each fold"""
|
ensemble_scores = numpy . empty ( ( n_estimators , n_folds ) )
base_ensemble = numpy . empty_like ( ensemble_scores , dtype = numpy . object )
for model , fold , score , est in out :
ensemble_scores [ model , fold ] = score
base_ensemble [ model , fold ] = est
return ensemble_scores , base_ensemble
|
def dump_graph ( self ) :
"""Dump a key - only representation of the schema to a dictionary . Every
known relation is a key with a value of a list of keys it is referenced
by ."""
|
# we have to hold the lock for the entire dump , if other threads modify
# self . relations or any cache entry ' s referenced _ by during iteration
# it ' s a runtime error !
with self . lock :
return { dot_separated ( k ) : v . dump_graph_entry ( ) for k , v in self . relations . items ( ) }
|
def lock ( func ) :
"""互斥锁"""
|
@ functools . wraps ( func )
def _func ( * args , ** kwargs ) :
mutex . acquire ( )
try :
return func ( * args , ** kwargs )
finally :
mutex . release ( )
return _func
|
def nested_insert ( self , item_list ) :
"""Create a series of nested LIVVDicts given a list"""
|
if len ( item_list ) == 1 :
self [ item_list [ 0 ] ] = LIVVDict ( )
elif len ( item_list ) > 1 :
if item_list [ 0 ] not in self :
self [ item_list [ 0 ] ] = LIVVDict ( )
self [ item_list [ 0 ] ] . nested_insert ( item_list [ 1 : ] )
|
def home ( self , n = 1 , interval = 0 , pre_dl = None , post_dl = None ) :
"""Pres home key n times .
* * 中文文档 * *
按 home 键n次 。"""
|
self . delay ( pre_dl )
self . k . tap_key ( self . k . home_key , n , interval )
self . delay ( post_dl )
|
def is_allowed ( self , user_agent , url , syntax = GYM2008 ) :
"""True if the user agent is permitted to visit the URL . The syntax
parameter can be GYM2008 ( the default ) or MK1996 for strict adherence
to the traditional standard ."""
|
if PY_MAJOR_VERSION < 3 : # The robot rules are stored internally as Unicode . The two lines
# below ensure that the parameters passed to this function are
# also Unicode . If those lines were not present and the caller
# passed a non - Unicode user agent or URL string to this function ,
# Python would silently convert it to Unicode before comparing it
# to the robot rules . Such conversions use the default encoding
# ( usually US - ASCII ) and if the string couldn ' t be converted using
# that encoding , Python would raise a UnicodeError later on in the
# guts of this code which would be confusing .
# Converting the strings to Unicode here doesn ' t make the problem
# go away but it does make the conversion explicit so that
# failures are easier to understand .
if not isinstance ( user_agent , unicode ) :
user_agent = user_agent . decode ( )
if not isinstance ( url , unicode ) :
url = url . decode ( )
if syntax not in ( MK1996 , GYM2008 ) :
_raise_error ( ValueError , "Syntax must be MK1996 or GYM2008" )
for ruleset in self . __rulesets :
if ruleset . does_user_agent_match ( user_agent ) :
return ruleset . is_url_allowed ( url , syntax )
return True
|
def if_then ( self , classical_reg , if_program , else_program = None ) :
"""If the classical register at index classical reg is 1 , run if _ program , else run
else _ program .
Equivalent to the following construction :
. . code : :
IF [ c ] :
instrA . . .
ELSE :
instrB . . .
JUMP - WHEN @ THEN [ c ]
instrB . . .
JUMP @ END
LABEL @ THEN
instrA . . .
LABEL @ END
: param int classical _ reg : The classical register to check as the condition
: param Program if _ program : A Quil program to execute if classical _ reg is 1
: param Program else _ program : A Quil program to execute if classical _ reg is 0 . This
argument is optional and defaults to an empty Program .
: returns : The Quil Program with the branching instructions added .
: rtype : Program"""
|
else_program = else_program if else_program is not None else Program ( )
label_then = LabelPlaceholder ( "THEN" )
label_end = LabelPlaceholder ( "END" )
self . inst ( JumpWhen ( target = label_then , condition = unpack_classical_reg ( classical_reg ) ) )
self . inst ( else_program )
self . inst ( Jump ( label_end ) )
self . inst ( JumpTarget ( label_then ) )
self . inst ( if_program )
self . inst ( JumpTarget ( label_end ) )
return self
|
def global_to_local ( self , index ) :
"""Calculate local index from global index
: param index : input index
: return : local index for data"""
|
if ( type ( index ) is int ) or ( type ( index ) is slice ) :
if len ( self . __mask ) > 1 :
raise IndexError ( 'check length of parameter index' )
# 1D array
if type ( index ) is int :
return self . int_global_to_local ( index )
elif type ( index ) is slice :
return self . slice_global_to_local ( index )
else :
raise IndexError ( 'check data type of index to be integer or slice' )
elif type ( index ) is tuple : # if len ( index ) is not len ( self . _ _ mask ) :
# raise IndexError ( ' check length of parameter index ' )
local_index = [ ]
for k , item in enumerate ( index ) :
if k < len ( self . __mask ) :
if type ( item ) is slice :
temp_index = self . slice_global_to_local ( item , k )
elif type ( item ) in [ int , np . int64 , np . int32 ] :
temp_index = self . int_global_to_local ( item , k )
if temp_index is None :
return temp_index
else :
temp_index = item
local_index . append ( temp_index )
return tuple ( local_index )
else :
raise IndexError ( 'check index for correct length and type' )
|
def _keygen ( self , event , ts = None ) :
"""Generate redis key for event at timestamp .
: param event : event name
: param ts : timestamp , default to current timestamp if left as None"""
|
return "%s:%s" % ( self . namespace ( ts or time . time ( ) ) , event )
|
def init ( name , cpu , mem , image , hypervisor = 'kvm' , host = None , seed = True , nic = 'default' , install = True , start = True , disk = 'default' , saltenv = 'base' , enable_vnc = False , seed_cmd = 'seed.apply' , enable_qcow = False , serial_type = 'None' ) :
'''This routine is used to create a new virtual machine . This routines takes
a number of options to determine what the newly created virtual machine
will look like .
name
The mandatory name of the new virtual machine . The name option is
also the minion id , all minions must have an id .
cpu
The number of cpus to allocate to this new virtual machine .
mem
The amount of memory to allocate to this virtual machine . The number
is interpreted in megabytes .
image
The network location of the virtual machine image , commonly a location
on the salt fileserver , but http , https and ftp can also be used .
hypervisor
The hypervisor to use for the new virtual machine . Default is ` kvm ` .
host
The host to use for the new virtual machine , if this is omitted
Salt will automatically detect what host to use .
seed
Set to ` False ` to prevent Salt from seeding the new virtual machine .
nic
The nic profile to use , defaults to the " default " nic profile which
assumes a single network interface per VM associated with the " br0"
bridge on the master .
install
Set to False to prevent Salt from installing a minion on the new VM
before it spins up .
disk
The disk profile to use
saltenv
The Salt environment to use
enable _ vnc
Whether a VNC screen is attached to resulting VM . Default is ` False ` .
seed _ cmd
If seed is ` True ` , use this execution module function to seed new VM .
Default is ` seed . apply ` .
enable _ qcow
Clone disk image as a copy - on - write qcow2 image , using downloaded
` image ` as backing file .
serial _ type
Enable serial console . Set to ' pty ' for serial console or ' tcp ' for
telnet .
Default is ' None ' '''
|
__jid_event__ . fire_event ( { 'message' : 'Searching for hosts' } , 'progress' )
data = query ( host , quiet = True )
# Check if the name is already deployed
for node in data :
if 'vm_info' in data [ node ] :
if name in data [ node ] [ 'vm_info' ] :
__jid_event__ . fire_event ( { 'message' : 'Virtual machine {0} is already deployed' . format ( name ) } , 'progress' )
return 'fail'
if host is None :
host = _determine_host ( data )
if host not in data or not host :
__jid_event__ . fire_event ( { 'message' : 'Host {0} was not found' . format ( host ) } , 'progress' )
return 'fail'
pub_key = None
priv_key = None
if seed :
__jid_event__ . fire_event ( { 'message' : 'Minion will be preseeded' } , 'progress' )
priv_key , pub_key = salt . utils . cloud . gen_keys ( )
accepted_key = os . path . join ( __opts__ [ 'pki_dir' ] , 'minions' , name )
with salt . utils . files . fopen ( accepted_key , 'w' ) as fp_ :
fp_ . write ( salt . utils . stringutils . to_str ( pub_key ) )
client = salt . client . get_local_client ( __opts__ [ 'conf_file' ] )
__jid_event__ . fire_event ( { 'message' : 'Creating VM {0} on host {1}' . format ( name , host ) } , 'progress' )
try :
cmd_ret = client . cmd_iter ( host , 'virt.init' , [ name , cpu , mem ] , timeout = 600 , kwarg = { 'image' : image , 'nic' : nic , 'hypervisor' : hypervisor , 'start' : start , 'disk' : disk , 'saltenv' : saltenv , 'seed' : seed , 'install' : install , 'pub_key' : pub_key , 'priv_key' : priv_key , 'seed_cmd' : seed_cmd , 'enable_vnc' : enable_vnc , 'enable_qcow' : enable_qcow , 'serial_type' : serial_type , } )
except SaltClientError as client_error : # Fall through to ret error handling below
print ( client_error )
ret = next ( cmd_ret )
if not ret :
__jid_event__ . fire_event ( { 'message' : 'VM {0} was not initialized.' . format ( name ) } , 'progress' )
return 'fail'
for minion_id in ret :
if ret [ minion_id ] [ 'ret' ] is False :
print ( 'VM {0} initialization failed. Returned error: {1}' . format ( name , ret [ minion_id ] [ 'ret' ] ) )
return 'fail'
__jid_event__ . fire_event ( { 'message' : 'VM {0} initialized on host {1}' . format ( name , host ) } , 'progress' )
return 'good'
|
def _parse_tree_dump ( text_dump ) : # type : ( str ) - > Optional [ Dict [ str , Any ] ]
"""Parse text tree dump ( one item of a list returned by Booster . get _ dump ( ) )
into json format that will be used by next XGBoost release ."""
|
result = None
stack = [ ]
# type : List [ Dict ]
for line in text_dump . split ( '\n' ) :
if line :
depth , node = _parse_dump_line ( line )
if depth == 0 :
assert not stack
result = node
stack . append ( node )
elif depth > len ( stack ) :
raise ValueError ( 'Unexpected dump structure' )
else :
if depth < len ( stack ) :
stack = stack [ : depth ]
stack [ - 1 ] . setdefault ( 'children' , [ ] ) . append ( node )
stack . append ( node )
return result
|
def QA_util_time_gap ( time , gap , methods , type_ ) :
'分钟线回测的时候的gap'
|
min_len = int ( 240 / int ( str ( type_ ) . split ( 'min' ) [ 0 ] ) )
day_gap = math . ceil ( gap / min_len )
if methods in [ '>' , 'gt' ] :
data = pd . concat ( [ pd . DataFrame ( QA_util_make_min_index ( day , type_ ) ) for day in trade_date_sse [ trade_date_sse . index ( str ( datetime . datetime . strptime ( time , '%Y-%m-%d %H:%M:%S' ) . date ( ) ) ) : trade_date_sse . index ( str ( datetime . datetime . strptime ( time , '%Y-%m-%d %H:%M:%S' ) . date ( ) ) ) + day_gap + 1 ] ] ) . reset_index ( )
return np . asarray ( data [ data [ 0 ] > time ] . head ( gap ) [ 0 ] . apply ( lambda x : str ( x ) ) ) . tolist ( ) [ - 1 ]
elif methods in [ '>=' , 'gte' ] :
data = pd . concat ( [ pd . DataFrame ( QA_util_make_min_index ( day , type_ ) ) for day in trade_date_sse [ trade_date_sse . index ( str ( datetime . datetime . strptime ( time , '%Y-%m-%d %H:%M:%S' ) . date ( ) ) ) : trade_date_sse . index ( str ( datetime . datetime . strptime ( time , '%Y-%m-%d %H:%M:%S' ) . date ( ) ) ) + day_gap + 1 ] ] ) . reset_index ( )
return np . asarray ( data [ data [ 0 ] >= time ] . head ( gap ) [ 0 ] . apply ( lambda x : str ( x ) ) ) . tolist ( ) [ - 1 ]
elif methods in [ '<' , 'lt' ] :
data = pd . concat ( [ pd . DataFrame ( QA_util_make_min_index ( day , type_ ) ) for day in trade_date_sse [ trade_date_sse . index ( str ( datetime . datetime . strptime ( time , '%Y-%m-%d %H:%M:%S' ) . date ( ) ) ) - day_gap : trade_date_sse . index ( str ( datetime . datetime . strptime ( time , '%Y-%m-%d %H:%M:%S' ) . date ( ) ) ) + 1 ] ] ) . reset_index ( )
return np . asarray ( data [ data [ 0 ] < time ] . tail ( gap ) [ 0 ] . apply ( lambda x : str ( x ) ) ) . tolist ( ) [ 0 ]
elif methods in [ '<=' , 'lte' ] :
data = pd . concat ( [ pd . DataFrame ( QA_util_make_min_index ( day , type_ ) ) for day in trade_date_sse [ trade_date_sse . index ( str ( datetime . datetime . strptime ( time , '%Y-%m-%d %H:%M:%S' ) . date ( ) ) ) - day_gap : trade_date_sse . index ( str ( datetime . datetime . strptime ( time , '%Y-%m-%d %H:%M:%S' ) . date ( ) ) ) + 1 ] ] ) . reset_index ( )
return np . asarray ( data [ data [ 0 ] <= time ] . tail ( gap ) [ 0 ] . apply ( lambda x : str ( x ) ) ) . tolist ( ) [ 0 ]
elif methods in [ '==' , '=' , 'eq' ] :
return time
|
def findlast ( * args , ** kwargs ) :
"""Find the last matching element in a list and return it .
Usage : :
findlast ( element , list _ )
findlast ( of = element , in _ = list _ )
findlast ( where = predicate , in _ = list _ )
: param element , of : Element to search for ( by equality comparison )
: param where : Predicate defining an element to search for .
This should be a callable taking a single argument
and returning a boolean result .
: param list _ , in _ : List to search in
: return : Last matching element
: raise IndexError : If no matching elements were found
. . versionadded : : 0.0.4"""
|
list_ , idx = _index ( * args , start = sys . maxsize , step = - 1 , ** kwargs )
if idx < 0 :
raise IndexError ( "element not found" )
return list_ [ idx ]
|
def conpair_general_stats_table ( self ) :
"""Take the parsed stats from the Conpair report and add it to the
basic stats table at the top of the report"""
|
headers = { }
headers [ 'concordance_concordance' ] = { 'title' : 'Concordance' , 'max' : 100 , 'min' : 0 , 'suffix' : '%' , 'format' : '{:,.2f}' , 'scale' : 'RdYlGn' }
headers [ 'contamination_normal' ] = { 'title' : 'N Contamination' , 'description' : 'Normal sample contamination level' , 'max' : 100 , 'min' : 0 , 'suffix' : '%' , 'format' : '{:,.3f}' , 'scale' : 'RdYlBu-rev' }
headers [ 'contamination_tumor' ] = { 'title' : 'T Contamination' , 'description' : 'Tumor sample contamination level' , 'max' : 100 , 'min' : 0 , 'suffix' : '%' , 'format' : '{:,.3f}' , 'scale' : 'RdYlBu-rev' }
self . general_stats_addcols ( self . conpair_data , headers )
|
def delete_post ( self , blogname , id ) :
"""Deletes a post with the given id
: param blogname : a string , the url of the blog you want to delete from
: param id : an int , the post id that you want to delete
: returns : a dict created from the JSON response"""
|
url = "/v2/blog/{}/post/delete" . format ( blogname )
return self . send_api_request ( 'post' , url , { 'id' : id } , [ 'id' ] )
|
def copy_tree ( src , dst , symlinks = False , ignore = [ ] ) :
"""Copy a full directory structure .
: param src : Source path
: param dst : Destination path
: param symlinks : Copy symlinks
: param ignore : Subdirs / filenames to ignore"""
|
names = os . listdir ( src )
if not os . path . exists ( dst ) :
os . makedirs ( dst )
errors = [ ]
for name in names :
if name in ignore :
continue
srcname = os . path . join ( src , name )
dstname = os . path . join ( dst , name )
try :
if symlinks and os . path . islink ( srcname ) :
linkto = os . readlink ( srcname )
os . symlink ( linkto , dstname )
elif os . path . isdir ( srcname ) :
copy_tree ( srcname , dstname , symlinks , ignore )
else :
copy_file ( srcname , dstname )
except ( IOError , os . error ) as exc :
errors . append ( ( srcname , dstname , str ( exc ) ) )
except CTError as exc :
errors . extend ( exc . errors )
if errors :
raise CTError ( errors )
|
def on_next_request ( self , py_db , request ) :
''': param NextRequest request :'''
|
arguments = request . arguments
# : : type arguments : NextArguments
thread_id = arguments . threadId
if py_db . get_use_libraries_filter ( ) :
step_cmd_id = CMD_STEP_OVER_MY_CODE
else :
step_cmd_id = CMD_STEP_OVER
self . api . request_step ( py_db , thread_id , step_cmd_id )
response = pydevd_base_schema . build_response ( request )
return NetCommand ( CMD_RETURN , 0 , response , is_json = True )
|
def _build_document_converter ( cls , session : AppSession ) :
'''Build the Document Converter .'''
|
if not session . args . convert_links :
return
converter = session . factory . new ( 'BatchDocumentConverter' , session . factory [ 'HTMLParser' ] , session . factory [ 'ElementWalker' ] , session . factory [ 'URLTable' ] , backup = session . args . backup_converted )
return converter
|
def waiver2mef ( sciname , newname = None , convert_dq = True , writefits = True ) :
"""Converts a GEIS science file and its corresponding
data quality file ( if present ) to MEF format
Writes out both files to disk .
Returns the new name of the science image ."""
|
if isinstance ( sciname , fits . HDUList ) :
filename = sciname . filename ( )
else :
filename = sciname
try :
clobber = True
fimg = convertwaiveredfits . convertwaiveredfits ( filename )
# check for the existence of a data quality file
_dqname = fileutil . buildNewRootname ( filename , extn = '_c1f.fits' )
dqexists = os . path . exists ( _dqname )
if convert_dq and dqexists :
try :
dqfile = convertwaiveredfits . convertwaiveredfits ( _dqname )
dqfitsname = fileutil . buildNewRootname ( _dqname , extn = '_c1h.fits' )
except Exception :
print ( "Could not read data quality file %s" % _dqname )
if writefits : # User wants to make a FITS copy and update it
# using the filename they have provided
rname = fileutil . buildNewRootname ( filename )
fitsname = fileutil . buildNewRootname ( rname , extn = '_c0h.fits' )
# Write out GEIS image as multi - extension FITS .
fexists = os . path . exists ( fitsname )
if ( fexists and clobber ) or not fexists :
print ( 'Writing out WAIVERED as MEF to ' , fitsname )
if ASTROPY_VER_GE13 :
fimg . writeto ( fitsname , overwrite = clobber )
else :
fimg . writeto ( fitsname , clobber = clobber )
if dqexists :
print ( 'Writing out WAIVERED as MEF to ' , dqfitsname )
if ASTROPY_VER_GE13 :
dqfile . writeto ( dqfitsname , overwrite = clobber )
else :
dqfile . writeto ( dqfitsname , clobber = clobber )
# Now close input GEIS image , and open writable
# handle to output FITS image instead . . .
fimg . close ( )
del fimg
fimg = fits . open ( fitsname , mode = 'update' , memmap = False )
return fimg
except IOError :
print ( 'Warning: File %s could not be found' % sciname )
return None
|
def get_json_or_yaml ( file_path , content ) :
"""Generate JSON or YAML depending on the file extension ."""
|
if os . path . splitext ( file_path ) [ 1 ] == ".json" :
return json . dumps ( content , sort_keys = False , indent = 4 , separators = ( ',' , ': ' ) )
else :
return inginious . common . custom_yaml . dump ( content )
|
def matched ( self , other ) :
"""Returns True if the two ValueElement instances differ only by name ,
default value or some other inconsequential modifier ."""
|
mods = [ "allocatable" , "pointer" ]
return ( self . kind . lower ( ) == other . kind . lower ( ) and self . dtype . lower ( ) == other . dtype . lower ( ) and self . D == other . D and all ( [ m in other . modifiers for m in self . modifiers if m in mods ] ) )
|
def dispatch_command ( self , command , stage ) :
"""Given a command to execute and stage ,
execute that command ."""
|
self . api_stage = stage
if command not in [ 'status' , 'manage' ] :
if not self . vargs . get ( 'json' , None ) :
click . echo ( "Calling " + click . style ( command , fg = "green" , bold = True ) + " for stage " + click . style ( self . api_stage , bold = True ) + ".." )
# Explicitly define the app function .
# Related : https : / / github . com / Miserlou / Zappa / issues / 832
if self . vargs . get ( 'app_function' , None ) :
self . app_function = self . vargs [ 'app_function' ]
# Load our settings , based on api _ stage .
try :
self . load_settings ( self . vargs . get ( 'settings_file' ) )
except ValueError as e :
if hasattr ( e , 'message' ) :
print ( "Error: {}" . format ( e . message ) )
else :
print ( str ( e ) )
sys . exit ( - 1 )
self . callback ( 'settings' )
# Hand it off
if command == 'deploy' : # pragma : no cover
self . deploy ( self . vargs [ 'zip' ] )
if command == 'package' : # pragma : no cover
self . package ( self . vargs [ 'output' ] )
if command == 'template' : # pragma : no cover
self . template ( self . vargs [ 'lambda_arn' ] , self . vargs [ 'role_arn' ] , output = self . vargs [ 'output' ] , json = self . vargs [ 'json' ] )
elif command == 'update' : # pragma : no cover
self . update ( self . vargs [ 'zip' ] , self . vargs [ 'no_upload' ] )
elif command == 'rollback' : # pragma : no cover
self . rollback ( self . vargs [ 'num_rollback' ] )
elif command == 'invoke' : # pragma : no cover
if not self . vargs . get ( 'command_rest' ) :
print ( "Please enter the function to invoke." )
return
self . invoke ( self . vargs [ 'command_rest' ] , raw_python = self . vargs [ 'raw' ] , no_color = self . vargs [ 'no_color' ] , )
elif command == 'manage' : # pragma : no cover
if not self . vargs . get ( 'command_rest' ) :
print ( "Please enter the management command to invoke." )
return
if not self . django_settings :
print ( "This command is for Django projects only!" )
print ( "If this is a Django project, please define django_settings in your zappa_settings." )
return
command_tail = self . vargs . get ( 'command_rest' )
if len ( command_tail ) > 1 :
command = " " . join ( command_tail )
# ex : zappa manage dev " shell - - version "
else :
command = command_tail [ 0 ]
# ex : zappa manage dev showmigrations admin
self . invoke ( command , command = "manage" , no_color = self . vargs [ 'no_color' ] , )
elif command == 'tail' : # pragma : no cover
self . tail ( colorize = ( not self . vargs [ 'no_color' ] ) , http = self . vargs [ 'http' ] , non_http = self . vargs [ 'non_http' ] , since = self . vargs [ 'since' ] , filter_pattern = self . vargs [ 'filter' ] , force_colorize = self . vargs [ 'force_color' ] or None , keep_open = not self . vargs [ 'disable_keep_open' ] )
elif command == 'undeploy' : # pragma : no cover
self . undeploy ( no_confirm = self . vargs [ 'yes' ] , remove_logs = self . vargs [ 'remove_logs' ] )
elif command == 'schedule' : # pragma : no cover
self . schedule ( )
elif command == 'unschedule' : # pragma : no cover
self . unschedule ( )
elif command == 'status' : # pragma : no cover
self . status ( return_json = self . vargs [ 'json' ] )
elif command == 'certify' : # pragma : no cover
self . certify ( no_confirm = self . vargs [ 'yes' ] , manual = self . vargs [ 'manual' ] )
elif command == 'shell' : # pragma : no cover
self . shell ( )
|
def has_permission ( obj_name , principal , permission , access_mode = 'grant' , obj_type = 'file' , exact = True ) :
r'''Check if the object has a permission
Args :
obj _ name ( str ) :
The name of or path to the object .
principal ( str ) :
The name of the user or group for which to get permissions . Can also
pass a SID .
permission ( str ) :
The permission to verify . Valid options depend on the obj _ type .
access _ mode ( Optional [ str ] ) :
The access mode to check . Is the user granted or denied the
permission . Default is ' grant ' . Valid options are :
- grant
- deny
obj _ type ( Optional [ str ] ) :
The type of object for which to check permissions . Default is ' file '
exact ( Optional [ bool ] ) :
True for an exact match , otherwise check to see if the permission is
included in the ACE . Default is True
Returns :
bool : True if the object has the permission , otherwise False
Usage :
. . code - block : : python
# Does Joe have read permissions to C : \ Temp
salt . utils . win _ dacl . has _ permission (
' C : \ \ Temp ' , ' joe ' , ' read ' , ' grant ' , False )
# Does Joe have Full Control of C : \ Temp
salt . utils . win _ dacl . has _ permission (
' C : \ \ Temp ' , ' joe ' , ' full _ control ' , ' grant ' )'''
|
# Validate access _ mode
if access_mode . lower ( ) not in [ 'grant' , 'deny' ] :
raise SaltInvocationError ( 'Invalid "access_mode" passed: {0}' . format ( access_mode ) )
access_mode = access_mode . lower ( )
# Get the DACL
obj_dacl = dacl ( obj_name , obj_type )
obj_type = obj_type . lower ( )
# Get a PySID object
sid = get_sid ( principal )
# Get the passed permission flag , check basic first
chk_flag = obj_dacl . ace_perms [ obj_type ] [ 'basic' ] . get ( permission . lower ( ) , obj_dacl . ace_perms [ obj_type ] [ 'advanced' ] . get ( permission . lower ( ) , False ) )
if not chk_flag :
raise SaltInvocationError ( 'Invalid "permission" passed: {0}' . format ( permission ) )
# Check each ace for sid and type
cur_flag = None
for i in range ( 0 , obj_dacl . dacl . GetAceCount ( ) ) :
ace = obj_dacl . dacl . GetAce ( i )
if ace [ 2 ] == sid and obj_dacl . ace_type [ ace [ 0 ] [ 0 ] ] == access_mode :
cur_flag = ace [ 1 ]
# If the ace is empty , return false
if not cur_flag :
return False
# Check if the ACE contains the exact flag
if exact :
return cur_flag == chk_flag
# Check if the ACE contains the permission
return cur_flag & chk_flag == chk_flag
|
def search_stack_for_var ( varname , verbose = util_arg . NOT_QUIET ) :
"""Finds a varable ( local or global ) somewhere in the stack and returns the value
Args :
varname ( str ) : variable name
Returns :
None if varname is not found else its value"""
|
curr_frame = inspect . currentframe ( )
if verbose :
print ( ' * Searching parent frames for: ' + six . text_type ( varname ) )
frame_no = 0
while curr_frame . f_back is not None :
if varname in curr_frame . f_locals . keys ( ) :
if verbose :
print ( ' * Found local in frame: ' + six . text_type ( frame_no ) )
return curr_frame . f_locals [ varname ]
if varname in curr_frame . f_globals . keys ( ) :
if verbose :
print ( ' * Found global in frame: ' + six . text_type ( frame_no ) )
return curr_frame . f_globals [ varname ]
frame_no += 1
curr_frame = curr_frame . f_back
if verbose :
print ( '... Found nothing in all ' + six . text_type ( frame_no ) + ' frames.' )
return None
|
def enable ( self , license_key , license_token , sandbox_type = 'cloud_sandbox' , service = 'Automatic' , http_proxy = None , sandbox_data_center = 'Automatic' ) :
"""Enable sandbox on this engine . Provide a valid license key
and license token obtained from your engine licensing .
Requires SMC version > = 6.3.
. . note : : Cloud sandbox is a feature that requires an engine license .
: param str license _ key : license key for specific engine
: param str license _ token : license token for specific engine
: param str sandbox _ type : ' local _ sandbox ' or ' cloud _ sandbox '
: param str , SandboxService service : a sandbox service element from SMC . The service
defines which location the engine is in and which data centers to use .
The default is to use the ' US Data Centers ' profile if undefined .
: param str , SandboxDataCenter sandbox _ data _ center : sandbox data center to use
if the service specified does not exist . Requires SMC > = 6.4.3
: return : None"""
|
service = element_resolver ( SandboxService ( service ) , do_raise = False ) or element_resolver ( SandboxService . create ( name = service , sandbox_data_center = SandboxDataCenter ( sandbox_data_center ) ) )
self . update ( sandbox_license_key = license_key , sandbox_license_token = license_token , sandbox_service = service , http_proxy = get_proxy ( http_proxy ) )
self . engine . data . setdefault ( 'sandbox_settings' , { } ) . update ( self . data )
self . engine . data . update ( sandbox_type = sandbox_type )
|
def can_overlap ( self , contig , strand = None ) :
"""Is this locus on the same contig and ( optionally ) on the same strand ?"""
|
return ( self . on_contig ( contig ) and ( strand is None or self . on_strand ( strand ) ) )
|
def insert ( self , key ) :
"""Insert new key into node"""
|
# Create new node
n = TreeNode ( key )
if not self . node :
self . node = n
self . node . left = AvlTree ( )
self . node . right = AvlTree ( )
elif key < self . node . val :
self . node . left . insert ( key )
elif key > self . node . val :
self . node . right . insert ( key )
self . re_balance ( )
|
def hyphenation ( phrase , format = 'json' ) :
"""Returns back the stress points in the " phrase " passed
: param phrase : word for which hyphenation is to be found
: param format : response structure type . Defaults to : " json "
: returns : returns a json object as str , False if invalid phrase"""
|
base_url = Vocabulary . __get_api_link ( "wordnik" )
url = base_url . format ( word = phrase . lower ( ) , action = "hyphenation" )
json_obj = Vocabulary . __return_json ( url )
if json_obj : # return json . dumps ( json _ obj )
# return json _ obj
return Response ( ) . respond ( json_obj , format )
else :
return False
|
def _get_qvm_qc ( name : str , qvm_type : str , device : AbstractDevice , noise_model : NoiseModel = None , requires_executable : bool = False , connection : ForestConnection = None ) -> QuantumComputer :
"""Construct a QuantumComputer backed by a QVM .
This is a minimal wrapper over the QuantumComputer , QVM , and QVMCompiler constructors .
: param name : A string identifying this particular quantum computer .
: param qvm _ type : The type of QVM . Either qvm or pyqvm .
: param device : A device following the AbstractDevice interface .
: param noise _ model : An optional noise model
: param requires _ executable : Whether this QVM will refuse to run a : py : class : ` Program ` and
only accept the result of : py : func : ` compiler . native _ quil _ to _ executable ` . Setting this
to True better emulates the behavior of a QPU .
: param connection : An optional : py : class : ` ForestConnection ` object . If not specified ,
the default values for URL endpoints will be used .
: return : A QuantumComputer backed by a QVM with the above options ."""
|
if connection is None :
connection = ForestConnection ( )
return QuantumComputer ( name = name , qam = _get_qvm_or_pyqvm ( qvm_type = qvm_type , connection = connection , noise_model = noise_model , device = device , requires_executable = requires_executable ) , device = device , compiler = QVMCompiler ( device = device , endpoint = connection . compiler_endpoint ) )
|
def loadSignalFromWav ( inputSignalFile , calibrationRealWorldValue = None , calibrationSignalFile = None , start = None , end = None ) -> Signal :
"""reads a wav file into a Signal and scales the input so that the sample are expressed in real world values
( as defined by the calibration signal ) .
: param inputSignalFile : a path to the input signal file
: param calibrationSignalFile : a path the calibration signal file
: param calibrationRealWorldValue : the real world value represented by the calibration signal
: param bitDepth : the bit depth of the input signal , used to rescale the value to a range of + 1 to - 1
: returns : a Signal"""
|
inputSignal = readWav ( inputSignalFile , start = start , end = end )
if calibrationSignalFile is not None :
calibrationSignal = readWav ( calibrationSignalFile )
scalingFactor = calibrationRealWorldValue / np . max ( calibrationSignal . samples )
return Signal ( inputSignal . samples * scalingFactor , inputSignal . fs )
else :
return inputSignal
|
def get_minimal_syspath ( self , absolute_paths = True ) :
"""Provide a list of directories that , when added to sys . path , would enable
any of the discovered python modules to be found"""
|
# firstly , gather a list of the minimum path to each package
package_list = set ( )
packages = [ p [ 0 ] for p in self . _packages if not p [ 1 ] ]
for package in sorted ( packages , key = len ) :
parent = os . path . split ( package ) [ 0 ]
if parent not in packages and parent not in package_list :
package_list . add ( parent )
# now add the directory containing any modules who are not in packages
module_list = [ ]
modules = [ m [ 0 ] for m in self . _modules if not m [ 1 ] ]
for module in modules :
dirname = os . path . dirname ( module )
if dirname not in packages :
module_list . append ( dirname )
full_list = sorted ( set ( module_list ) | package_list | { self . rootpath } , key = len )
if absolute_paths :
full_list = [ os . path . join ( self . rootpath , p ) . rstrip ( os . path . sep ) for p in full_list ]
return full_list
|
def _PrintCheckDependencyStatus ( self , dependency , result , status_message , verbose_output = True ) :
"""Prints the check dependency status .
Args :
dependency ( DependencyDefinition ) : dependency definition .
result ( bool ) : True if the Python module is available and conforms to
the minimum required version , False otherwise .
status _ message ( str ) : status message .
verbose _ output ( Optional [ bool ] ) : True if output should be verbose ."""
|
if not result or dependency . is_optional :
if dependency . is_optional :
status_indicator = '[OPTIONAL]'
else :
status_indicator = '[FAILURE]'
print ( '{0:s}\t{1:s}' . format ( status_indicator , status_message ) )
elif verbose_output :
print ( '[OK]\t\t{0:s}' . format ( status_message ) )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.