signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def invite_user_view ( self ) :
"""Allows users to send invitations to register an account"""
|
invite_user_form = self . InviteUserFormClass ( request . form )
if request . method == 'POST' and invite_user_form . validate ( ) : # Find User and UserEmail by email
email = invite_user_form . email . data
user , user_email = self . db_manager . get_user_and_user_email_by_email ( email )
if user :
flash ( "User with that email has already registered" , "error" )
return redirect ( url_for ( 'user.invite_user' ) )
# Add UserInvitation
user_invitation = self . db_manager . add_user_invitation ( email = email , invited_by_user_id = current_user . id )
self . db_manager . commit ( )
try : # Send invite _ user email
self . email_manager . send_invite_user_email ( current_user , user_invitation )
except Exception as e : # delete new UserInvitation object if send fails
self . db_manager . delete_object ( user_invitation )
self . db_manager . commit ( )
raise
# Send sent _ invitation signal
signals . user_sent_invitation . send ( current_app . _get_current_object ( ) , user_invitation = user_invitation , form = invite_user_form )
# Flash a system message
flash ( _ ( 'Invitation has been sent.' ) , 'success' )
# Redirect
safe_next_url = self . _get_safe_next_url ( 'next' , self . USER_AFTER_INVITE_ENDPOINT )
return redirect ( safe_next_url )
self . prepare_domain_translations ( )
return render_template ( self . USER_INVITE_USER_TEMPLATE , form = invite_user_form )
|
def rename_fields ( layer , fields_to_copy ) :
"""Rename fields inside an attribute table .
Only since QGIS 2.16.
: param layer : The vector layer .
: type layer : QgsVectorLayer
: param fields _ to _ copy : Dictionary of fields to copy .
: type fields _ to _ copy : dict"""
|
for field in fields_to_copy :
index = layer . fields ( ) . lookupField ( field )
if index != - 1 :
layer . startEditing ( )
layer . renameAttribute ( index , fields_to_copy [ field ] )
layer . commitChanges ( )
LOGGER . info ( 'Renaming field %s to %s' % ( field , fields_to_copy [ field ] ) )
else :
LOGGER . info ( 'Field %s not present in the layer while trying to renaming ' 'it to %s' % ( field , fields_to_copy [ field ] ) )
|
def sprite_filepath_build ( sprite_type , sprite_id , ** kwargs ) :
"""returns the filepath of the sprite * relative to SPRITE _ CACHE *"""
|
options = parse_sprite_options ( sprite_type , ** kwargs )
filename = '.' . join ( [ str ( sprite_id ) , SPRITE_EXT ] )
filepath = os . path . join ( sprite_type , * options , filename )
return filepath
|
def stack_smooth ( s_orig , size = 7 , save = False ) :
"""Run Gaussian smoothing filter on exising stack object"""
|
from copy import deepcopy
from pygeotools . lib import filtlib
print ( "Copying original DEMStack" )
s = deepcopy ( s_orig )
s . stack_fn = os . path . splitext ( s_orig . stack_fn ) [ 0 ] + '_smooth%ipx.npz' % size
# Loop through each array and smooth
print ( "Smoothing all arrays in stack with %i px gaussian filter" % size )
for i in range ( s . ma_stack . shape [ 0 ] ) :
print ( '%i of %i' % ( i + 1 , s . ma_stack . shape [ 0 ] ) )
s . ma_stack [ i ] = filtlib . gauss_fltr_astropy ( s . ma_stack [ i ] , size = size )
if s . stats :
s . compute_stats ( )
if save :
s . write_stats ( )
# Update datestack
if s . datestack and s . date_list_o . count ( ) > 1 :
s . compute_dt_stats ( )
if save :
s . write_datestack ( )
# Update trend
if s . trend :
s . compute_trend ( )
if save :
s . write_trend ( )
if save :
s . savestack ( )
return s
|
def clean ( ) :
"""Clear out any old screenshots"""
|
screenshot_dir = settings . SELENIUM_SCREENSHOT_DIR
if screenshot_dir and os . path . isdir ( screenshot_dir ) :
rmtree ( screenshot_dir , ignore_errors = True )
|
def list_upgrades ( refresh = True , ** kwargs ) :
'''List those packages for which an upgrade is available
The ` ` fromrepo ` ` argument is also supported , as used in pkg states .
CLI Example :
. . code - block : : bash
salt ' * ' pkg . list _ upgrades
jail
List upgrades within the specified jail
CLI Example :
. . code - block : : bash
salt ' * ' pkg . list _ upgrades jail = < jail name or id >
chroot
List upgrades within the specified chroot ( ignored if ` ` jail ` ` is
specified )
CLI Example :
. . code - block : : bash
salt ' * ' pkg . list _ upgrades chroot = / path / to / chroot
root
List upgrades within the specified root ( ignored if ` ` jail ` ` is
specified )
CLI Example :
. . code - block : : bash
salt ' * ' pkg . list _ upgrades root = / path / to / chroot'''
|
jail = kwargs . pop ( 'jail' , None )
chroot = kwargs . pop ( 'chroot' , None )
root = kwargs . pop ( 'root' , None )
fromrepo = kwargs . pop ( 'fromrepo' , None )
cmd = _pkg ( jail , chroot , root )
cmd . extend ( [ 'upgrade' , '--dry-run' , '--quiet' ] )
if not refresh :
cmd . append ( '--no-repo-update' )
if fromrepo :
cmd . extend ( [ '--repository' , fromrepo ] )
out = __salt__ [ 'cmd.run_stdout' ] ( cmd , output_loglevel = 'trace' , python_shell = False , ignore_retcode = True )
return { pkgname : pkgstat [ 'version' ] [ 'new' ] for pkgname , pkgstat in six . iteritems ( _parse_upgrade ( out ) [ 'upgrade' ] ) }
|
def _cursorRight ( self ) :
"""Handles " cursor right " events"""
|
if self . cursorPos < len ( self . inputBuffer ) :
self . cursorPos += 1
sys . stdout . write ( console . CURSOR_RIGHT )
sys . stdout . flush ( )
|
def set_timestamp ( self , data ) :
"""Interpret time - related options , apply queue - time parameter as needed"""
|
if 'hittime' in data : # an absolute timestamp
data [ 'qt' ] = self . hittime ( timestamp = data . pop ( 'hittime' , None ) )
if 'hitage' in data : # a relative age ( in seconds )
data [ 'qt' ] = self . hittime ( age = data . pop ( 'hitage' , None ) )
|
def _clean_accents ( self , text ) :
"""Remove most accent marks .
Note that the circumflexes over alphas and iotas in the text since
they determine vocalic quantity .
: param text : raw text
: return : clean text with minimum accent marks
: rtype : string"""
|
accents = { 'ὲέἐἑἒἓἕἔ' : 'ε' , 'ὺύὑὐὒὓὔὕ' : 'υ' , 'ὸόὀὁὂὃὄὅ' : 'ο' , 'ὶίἰἱἲἳἵἴ' : 'ι' , 'ὰάἁἀἂἃἅἄᾳᾂᾃ' : 'α' , 'ὴήἠἡἢἣἥἤἧἦῆῄῂῇῃᾓᾒᾗᾖᾑᾐ' : 'η' , 'ὼώὠὡὢὣὤὥὦὧῶῲῴῷῳᾧᾦᾢᾣᾡᾠ' : 'ω' , 'ἶἷ' : 'ῖ' , 'ἆἇᾷᾆᾇ' : 'ᾶ' , 'ὖὗ' : 'ῦ' , }
text = self . _clean_text ( text )
for char in text :
for key in accents . keys ( ) :
if char in key :
text = text . replace ( char , accents . get ( key ) )
else :
pass
return text
|
def WriteStatEntries ( stat_entries , client_id , mutation_pool , token = None ) :
"""Persists information about stat entries .
Args :
stat _ entries : A list of ` StatEntry ` instances .
client _ id : An id of a client the stat entries come from .
mutation _ pool : A mutation pool used for writing into the AFF4 data store .
token : A token used for writing into the AFF4 data store ."""
|
for stat_response in stat_entries :
if stat_response . pathspec . last . stream_name : # This is an ads . In that case we always need to create a file or
# we won ' t be able to access the data . New clients send the correct mode
# already but to make sure , we set this to a regular file anyways .
# Clear all file type bits :
stat_response . st_mode &= ~ stat_type_mask
stat_response . st_mode |= stat . S_IFREG
if data_store . AFF4Enabled ( ) :
for stat_entry in stat_entries :
CreateAFF4Object ( stat_entry , client_id_urn = rdf_client . ClientURN ( client_id ) , mutation_pool = mutation_pool , token = token )
if data_store . RelationalDBEnabled ( ) :
path_infos = [ rdf_objects . PathInfo . FromStatEntry ( s ) for s in stat_entries ]
# NOTE : TSK may return duplicate entries . This is may be either due to
# a bug in TSK implementation , or due to the fact that TSK is capable
# of returning deleted files information . Our VFS data model only supports
# storing multiple versions of the files when we collect the versions
# ourselves . At the moment we can ' t store multiple versions of the files
# " as returned by TSK " .
# Current behaviour is to simply drop excessive version before the
# WritePathInfo call . This way files returned by TSK will still make it
# into the flow ' s results , but not into the VFS data .
data_store . REL_DB . WritePathInfos ( client_id , _FilterOutPathInfoDuplicates ( path_infos ) )
|
def _get_proxies ( self ) :
"""Returns the : class : ` ProxyJSON < odoorpc . rpc . jsonrpclib . ProxyJSON > `
and : class : ` ProxyHTTP < odoorpc . rpc . jsonrpclib . ProxyHTTP > ` instances
corresponding to the server version used ."""
|
proxy_json = jsonrpclib . ProxyJSON ( self . host , self . port , self . _timeout , ssl = self . ssl , deserialize = self . deserialize , opener = self . _opener )
proxy_http = jsonrpclib . ProxyHTTP ( self . host , self . port , self . _timeout , ssl = self . ssl , opener = self . _opener )
# Detect the server version
if self . version is None :
result = proxy_json ( '/web/webclient/version_info' ) [ 'result' ]
if 'server_version' in result :
self . version = result [ 'server_version' ]
return proxy_json , proxy_http
|
def result ( self ) :
"""Formats the result ."""
|
return { "count" : self . _count , "total" : self . _total , "average" : float ( self . _total ) / self . _count if self . _count else 0 }
|
def instance_name ( string ) :
"""Check for valid instance name"""
|
invalid = ':/@'
if set ( string ) . intersection ( invalid ) :
msg = 'Invalid instance name {}' . format ( string )
raise argparse . ArgumentTypeError ( msg )
return string
|
def apply_scaling ( data , dicom_headers ) :
"""Rescale the data based on the RescaleSlope and RescaleOffset
Based on the scaling from pydicomseries
: param dicom _ headers : dicom headers to use to retreive the scaling factors
: param data : the input data"""
|
# Apply the rescaling if needed
private_scale_slope_tag = Tag ( 0x2005 , 0x100E )
private_scale_intercept_tag = Tag ( 0x2005 , 0x100D )
if 'RescaleSlope' in dicom_headers or 'RescaleIntercept' in dicom_headers or private_scale_slope_tag in dicom_headers or private_scale_intercept_tag in dicom_headers :
rescale_slope = 1
rescale_intercept = 0
if 'RescaleSlope' in dicom_headers :
rescale_slope = dicom_headers . RescaleSlope
if 'RescaleIntercept' in dicom_headers :
rescale_intercept = dicom_headers . RescaleIntercept
# try :
# # this section can sometimes fail due to unknown private fields
# if private _ scale _ slope _ tag in dicom _ headers :
# private _ scale _ slope = float ( dicom _ headers [ private _ scale _ slope _ tag ] . value )
# if private _ scale _ slope _ tag in dicom _ headers :
# private _ scale _ slope = float ( dicom _ headers [ private _ scale _ slope _ tag ] . value )
# except :
# pass
return do_scaling ( data , rescale_slope , rescale_intercept )
else :
return data
|
def get_archive ( self , container , path , chunk_size = DEFAULT_DATA_CHUNK_SIZE ) :
"""Retrieve a file or folder from a container in the form of a tar
archive .
Args :
container ( str ) : The container where the file is located
path ( str ) : Path to the file or folder to retrieve
chunk _ size ( int ) : The number of bytes returned by each iteration
of the generator . If ` ` None ` ` , data will be streamed as it is
received . Default : 2 MB
Returns :
( tuple ) : First element is a raw tar data stream . Second element is
a dict containing ` ` stat ` ` information on the specified ` ` path ` ` .
Raises :
: py : class : ` docker . errors . APIError `
If the server returns an error .
Example :
> > > c = docker . APIClient ( )
> > > f = open ( ' . / sh _ bin . tar ' , ' wb ' )
> > > bits , stat = c . get _ archive ( container , ' / bin / sh ' )
> > > print ( stat )
{ ' name ' : ' sh ' , ' size ' : 1075464 , ' mode ' : 493,
' mtime ' : ' 2018-10-01T15:37:48-07:00 ' , ' linkTarget ' : ' ' }
> > > for chunk in bits :
. . . f . write ( chunk )
> > > f . close ( )"""
|
params = { 'path' : path }
url = self . _url ( '/containers/{0}/archive' , container )
res = self . _get ( url , params = params , stream = True )
self . _raise_for_status ( res )
encoded_stat = res . headers . get ( 'x-docker-container-path-stat' )
return ( self . _stream_raw_result ( res , chunk_size , False ) , utils . decode_json_header ( encoded_stat ) if encoded_stat else None )
|
def boxplot ( df , category , quantity , category_type = "N" , title = None , xlabel = None , ylabel = None ) :
"""Plot a simple boxplot using Altair .
Parameters
df : ` pandas . DataFrame `
Contains columns matching ' category ' and ' quantity ' labels , at a minimum .
category : ` string `
The name of the column in df used to group values on the horizontal axis .
quantity : ` string `
The name of the columm in df of values to plot on the vertical axis . Must be numerical .
category _ type : { ' N ' , ' O ' , ' T ' } , optional
Nominal , ordinal , or time values can be used as categories . Quantitative ( Q ) values look weird .
title : ` string ` , optional
Text label at the top of the plot .
xlabel : ` string ` , optional
Text label along the horizontal axis .
ylabel : ` string ` , optional
Text label along the vertical axis .
Returns
` altair . Chart `"""
|
# must be one of Nominal , Ordinal , Time per altair
if category_type not in ( "N" , "O" , "T" ) :
raise OneCodexException ( "If specifying category_type, must be N, O, or T" )
# adapted from https : / / altair - viz . github . io / gallery / boxplot _ max _ min . html
lower_box = "q1({}):Q" . format ( quantity )
lower_whisker = "min({}):Q" . format ( quantity )
upper_box = "q3({}):Q" . format ( quantity )
upper_whisker = "max({}):Q" . format ( quantity )
if category_type == "T" :
x_format = "hoursminutes({}):{}" . format ( category , category_type )
else :
x_format = "{}:{}" . format ( category , category_type )
lower_plot = ( alt . Chart ( df ) . mark_rule ( ) . encode ( y = alt . Y ( lower_whisker , axis = alt . Axis ( title = ylabel ) ) , y2 = lower_box , x = x_format ) )
middle_plot = alt . Chart ( df ) . mark_bar ( size = 35 ) . encode ( y = lower_box , y2 = upper_box , x = x_format )
upper_plot = alt . Chart ( df ) . mark_rule ( ) . encode ( y = upper_whisker , y2 = upper_box , x = x_format )
middle_tick = ( alt . Chart ( df ) . mark_tick ( color = "black" , size = 35 ) . encode ( y = "median({}):Q" . format ( quantity ) , x = alt . X ( x_format , axis = alt . Axis ( title = xlabel ) , scale = alt . Scale ( rangeStep = 45 ) ) , tooltip = "median({}):Q" . format ( quantity ) , ) )
chart = lower_plot + middle_plot + upper_plot + middle_tick
if title :
chart = chart . properties ( title = title )
return chart
|
def crop ( self , start = None , end = None , copy = False ) :
"""Crop this series to the given x - axis extent .
Parameters
start : ` float ` , optional
lower limit of x - axis to crop to , defaults to
current ` ~ Series . x0 `
end : ` float ` , optional
upper limit of x - axis to crop to , defaults to current series end
copy : ` bool ` , optional , default : ` False `
copy the input data to fresh memory , otherwise return a view
Returns
series : ` Series `
A new series with a sub - set of the input data
Notes
If either ` ` start ` ` or ` ` end ` ` are outside of the original
` Series ` span , warnings will be printed and the limits will
be restricted to the : attr : ` ~ Series . xspan `"""
|
x0 , x1 = self . xspan
xtype = type ( x0 )
if isinstance ( start , Quantity ) :
start = start . to ( self . xunit ) . value
if isinstance ( end , Quantity ) :
end = end . to ( self . xunit ) . value
# pin early starts to time - series start
if start == x0 :
start = None
elif start is not None and xtype ( start ) < x0 :
warn ( '%s.crop given start smaller than current start, ' 'crop will begin when the Series actually starts.' % type ( self ) . __name__ )
start = None
# pin late ends to time - series end
if end == x1 :
end = None
if end is not None and xtype ( end ) > x1 :
warn ( '%s.crop given end larger than current end, ' 'crop will end when the Series actually ends.' % type ( self ) . __name__ )
end = None
# find start index
if start is None :
idx0 = None
else :
idx0 = int ( ( xtype ( start ) - x0 ) // self . dx . value )
# find end index
if end is None :
idx1 = None
else :
idx1 = int ( ( xtype ( end ) - x0 ) // self . dx . value )
if idx1 >= self . size :
idx1 = None
# crop
if copy :
return self [ idx0 : idx1 ] . copy ( )
return self [ idx0 : idx1 ]
|
def set_pos_info_recurse ( self , node , start , finish , parent = None ) :
"""Set positions under node"""
|
self . set_pos_info ( node , start , finish )
if parent is None :
parent = node
for n in node :
n . parent = parent
if hasattr ( n , 'offset' ) :
self . set_pos_info ( n , start , finish )
else :
n . start = start
n . finish = finish
self . set_pos_info_recurse ( n , start , finish , parent )
return
|
def write_log_file ( namespace , document ) :
"""Writes a line to a log file
Arguments :
namespace { str } - - namespace of document
document { dict } - - document to write to the logs"""
|
log_timestamp = asctime ( gmtime ( document [ TS ] ) )
with open ( "{}{}.{}.log" . format ( LOG_DIR , namespace , DAY_STRING ) , "a" ) as f :
log_string = dumps ( { "datetime" : log_timestamp . upper ( ) , "namespace" : namespace , "log" : document [ LOG_KEY ] } )
f . write ( "{}\n" . format ( log_string ) )
|
def error ( self , msg ) :
"""Log Error Messages"""
|
self . _execActions ( 'error' , msg )
msg = self . _execFilters ( 'error' , msg )
self . _processMsg ( 'error' , msg )
self . _sendMsg ( 'error' , msg )
|
def _getDirection ( coord1 , coord2 ) :
"""Return the direction the line formed by the ( x , y )
points in ` coord1 ` and ` coord2 ` ."""
|
x1 , y1 = coord1
x2 , y2 = coord2
if x1 == x2 and y1 == y2 :
return None
# two coordinates are the same .
elif x1 == x2 and y1 > y2 :
return UP
elif x1 == x2 and y1 < y2 :
return DOWN
elif x1 > x2 and y1 == y2 :
return LEFT
elif x1 < x2 and y1 == y2 :
return RIGHT
slope = float ( y2 - y1 ) / float ( x2 - x1 )
# Figure out which quadrant the line is going in , and then
# determine the closest direction by calculating the slope
if x2 > x1 and y2 < y1 : # up right quadrant
if slope > - 0.4142 :
return RIGHT
# slope is between 0 and 22.5 degrees
elif slope < - 2.4142 :
return UP
# slope is between 67.5 and 90 degrees
else :
return UPRIGHT
# slope is between 22.5 and 67.5 degrees
elif x2 > x1 and y2 > y1 : # down right quadrant
if slope > 2.4142 :
return DOWN
elif slope < 0.4142 :
return RIGHT
else :
return DOWNRIGHT
elif x2 < x1 and y2 < y1 : # up left quadrant
if slope < 0.4142 :
return LEFT
elif slope > 2.4142 :
return UP
else :
return UPLEFT
elif x2 < x1 and y2 > y1 : # down left quadrant
if slope < - 2.4142 :
return DOWN
elif slope > - 0.4142 :
return LEFT
else :
return DOWNLEFT
|
def open_read ( self , headers = None , query_args = '' , override_num_retries = None , response_headers = None ) :
"""Open this key for reading
: type headers : dict
: param headers : Headers to pass in the web request
: type query _ args : string
: param query _ args : Arguments to pass in the query string ( ie , ' torrent ' )
: type override _ num _ retries : int
: param override _ num _ retries : If not None will override configured
num _ retries parameter for underlying GET .
: type response _ headers : dict
: param response _ headers : A dictionary containing HTTP headers / values
that will override any headers associated with
the stored object in the response .
See http : / / goo . gl / EWOPb for details ."""
|
if self . resp == None :
self . mode = 'r'
provider = self . bucket . connection . provider
self . resp = self . bucket . connection . make_request ( 'GET' , self . bucket . name , self . name , headers , query_args = query_args , override_num_retries = override_num_retries )
if self . resp . status < 199 or self . resp . status > 299 :
body = self . resp . read ( )
raise provider . storage_response_error ( self . resp . status , self . resp . reason , body )
response_headers = self . resp . msg
self . metadata = boto . utils . get_aws_metadata ( response_headers , provider )
for name , value in response_headers . items ( ) : # To get correct size for Range GETs , use Content - Range
# header if one was returned . If not , use Content - Length
# header .
if ( name . lower ( ) == 'content-length' and 'Content-Range' not in response_headers ) :
self . size = int ( value )
elif name . lower ( ) == 'content-range' :
end_range = re . sub ( '.*/(.*)' , '\\1' , value )
self . size = int ( end_range )
elif name . lower ( ) == 'etag' :
self . etag = value
elif name . lower ( ) == 'content-type' :
self . content_type = value
elif name . lower ( ) == 'content-encoding' :
self . content_encoding = value
elif name . lower ( ) == 'last-modified' :
self . last_modified = value
elif name . lower ( ) == 'cache-control' :
self . cache_control = value
self . handle_version_headers ( self . resp )
self . handle_encryption_headers ( self . resp )
|
def lock ( self ) :
"""Lock the device ."""
|
success = self . set_status ( CONST . STATUS_LOCKCLOSED_INT )
if success :
self . _json_state [ 'status' ] = CONST . STATUS_LOCKCLOSED
return success
|
def stat ( self , path ) :
"""safely gets the Znode ' s Stat"""
|
try :
stat = self . exists ( str ( path ) )
except ( NoNodeError , NoAuthError ) :
stat = None
return stat
|
def get ( self , sid ) :
"""Constructs a ExecutionStepContext
: param sid : Step Sid .
: returns : twilio . rest . studio . v1 . flow . execution . execution _ step . ExecutionStepContext
: rtype : twilio . rest . studio . v1 . flow . execution . execution _ step . ExecutionStepContext"""
|
return ExecutionStepContext ( self . _version , flow_sid = self . _solution [ 'flow_sid' ] , execution_sid = self . _solution [ 'execution_sid' ] , sid = sid , )
|
def execute ( self , run ) :
"""This function executes the tool with a sourcefile with options .
It also calls functions for output before and after the run ."""
|
self . output_handler . output_before_run ( run )
benchmark = self . benchmark
memlimit = benchmark . rlimits . get ( MEMLIMIT )
args = run . cmdline ( )
logging . debug ( 'Command line of run is %s' , args )
run_result = self . run_executor . execute_run ( args , output_filename = run . log_file , output_dir = run . result_files_folder , result_files_patterns = benchmark . result_files_patterns , hardtimelimit = benchmark . rlimits . get ( TIMELIMIT ) , softtimelimit = benchmark . rlimits . get ( SOFTTIMELIMIT ) , walltimelimit = benchmark . rlimits . get ( WALLTIMELIMIT ) , cores = self . my_cpus , memory_nodes = self . my_memory_nodes , memlimit = memlimit , environments = benchmark . environment ( ) , workingDir = benchmark . working_directory ( ) , maxLogfileSize = benchmark . config . maxLogfileSize , files_count_limit = benchmark . config . filesCountLimit , files_size_limit = benchmark . config . filesSizeLimit )
if self . run_executor . PROCESS_KILLED : # If the run was interrupted , we ignore the result and cleanup .
try :
if benchmark . config . debug :
os . rename ( run . log_file , run . log_file + ".killed" )
else :
os . remove ( run . log_file )
except OSError :
pass
return 1
if self . my_cpus :
run_result [ 'cpuCores' ] = self . my_cpus
if self . my_memory_nodes :
run_result [ 'memoryNodes' ] = self . my_memory_nodes
run . set_result ( run_result )
self . output_handler . output_after_run ( run )
|
def debug_layer ( self , layer , check_fields = True , add_to_datastore = None ) :
"""Write the layer produced to the datastore if debug mode is on .
: param layer : The QGIS layer to check and save .
: type layer : QgsMapLayer
: param check _ fields : Boolean to check or not inasafe _ fields .
By default , it ' s true .
: type check _ fields : bool
: param add _ to _ datastore : Boolean if we need to store the layer . This
parameter will overwrite the debug mode behaviour . Default to None ,
we usually let debug mode choose for us .
: param add _ to _ datastore : bool
: return : The name of the layer added in the datastore .
: rtype : basestring"""
|
# This one checks the memory layer .
check_layer ( layer , has_geometry = None )
if isinstance ( layer , QgsVectorLayer ) and check_fields :
is_geojson = '.geojson' in layer . source ( ) . lower ( )
if layer . featureCount ( ) == 0 and is_geojson : # https : / / issues . qgis . org / issues / 18370
# We can ' t check a geojson file with 0 feature .
pass
else :
check_inasafe_fields ( layer )
# Be careful , add _ to _ datastore can be None , True or False .
# None means we let debug _ mode to choose for us .
# If add _ to _ datastore is not None , we do not care about debug _ mode .
if isinstance ( add_to_datastore , bool ) and add_to_datastore :
save_layer = True
elif isinstance ( add_to_datastore , bool ) and not add_to_datastore :
save_layer = False
elif self . debug_mode :
save_layer = True
else :
save_layer = False
if save_layer :
result , name = self . datastore . add_layer ( layer , layer . keywords [ 'title' ] )
if not result :
raise Exception ( 'Something went wrong with the datastore : {error_message}' . format ( error_message = name ) )
if self . debug_mode : # This one checks the GeoJSON file . We noticed some difference
# between checking a memory layer and a file based layer .
check_layer ( self . datastore . layer ( name ) )
return name
|
def has_cjk ( self ) :
"""Checks if the word of the chunk contains CJK characters .
This is using unicode codepoint ranges from
https : / / github . com / nltk / nltk / blob / develop / nltk / tokenize / util . py # L149
Returns :
bool : True if the chunk has any CJK character ."""
|
cjk_codepoint_ranges = [ ( 4352 , 4607 ) , ( 11904 , 42191 ) , ( 43072 , 43135 ) , ( 44032 , 55215 ) , ( 63744 , 64255 ) , ( 65072 , 65103 ) , ( 65381 , 65500 ) , ( 131072 , 196607 ) ]
for char in self . word :
if any ( [ start <= ord ( char ) <= end for start , end in cjk_codepoint_ranges ] ) :
return True
return False
|
def get_parents ( self ) :
"""Returns the parents of the variables present in the network
Examples
> > > reader = XMLBIF . XMLBIFReader ( " xmlbif _ test . xml " )
> > > reader . get _ parents ( )
{ ' bowel - problem ' : [ ] ,
' dog - out ' : [ ' family - out ' , ' bowel - problem ' ] ,
' family - out ' : [ ] ,
' hear - bark ' : [ ' dog - out ' ] ,
' light - on ' : [ ' family - out ' ] }"""
|
variable_parents = { definition . find ( 'FOR' ) . text : [ edge . text for edge in definition . findall ( 'GIVEN' ) ] for definition in self . network . findall ( 'DEFINITION' ) }
return variable_parents
|
def fillNoneValues ( column ) :
"""Fill all NaN / NaT values of a column with an empty string
Args :
column ( pandas . Series ) : A Series object with all rows .
Returns :
column : Series with filled NaN values ."""
|
if column . dtype == object :
column . fillna ( '' , inplace = True )
return column
|
def parse_name ( parts , main_names , common_names , debug = 0 ) :
"""Parse all name ( s ) from a Backpage ad .
parts - > The backpage ad ' s posting _ body , separated into substrings
main _ names - > " Regular " names ( jessica , gabriel , etc . ) that can be trusted as names simply by its existence
common _ names - > Names such as " pleasure " or " sexy " that should only be parsed if surrounded by an " intro "
main _ names and common _ names can both be either sets or dictionaries . Current version uses dictionaries ."""
|
lowercase_parts = [ re . sub ( r'(in|out)call' , '' , p . lower ( ) ) for p in parts ]
start = time . time ( )
# Intros to common names
intros = { 'pre' : [ 'my name is' , 'i am' , 'call me' , 'call' , 'text' , 'my names' , 'my name' , 'known as' , 'go by' , 'Intro' , 'ask for' , 'call for' , 'ask' , 'this is' , 'one and only' , 'prevphonespothti' , 'called' ] , 'post' : [ 'is back' , 'is here' , 'in town' , 'prevphonespothti' , 'is ready' , 'is available' ] }
spanish_intros = [ 'hola soy' , 'me llamo' , 'mi nombre es' , 'yo soy' , 'pregunta por' , 'encantada de conocerlo' , 'hola papi' ]
intros [ 'pre' ] . extend ( spanish_intros )
# Regex intros to common names
rgx_intros = { 'pre' : [ r'\b(?:it\'s)\b' , r'\b(?:it s)\b' , r'\b(?:its)\b' , r'\b(?:soy)\b' , r'\b(?:es)\b' , r'\b(?:hola)\b' , r'\b(?:y?o?ur girl)\b' , r'\b(?:i\'m)\b' , r'\b(?:im)\b' , r'\b(?:y?o?ur favorite girl)\b' , r'\b(?:y?o?ur most favorite girl)\b' , r'\bmy ?fr(?:i|e)(?:i|e)nd\b' , r'\bm(?:s|z)\.' , r'\bmi(?:s{1,2}|z{1,2})' ] , 'post' : [ r'\b(?:here)\b' , r'\b(?:(?:i|\')s (?:the|my) name)\b' ] }
# These words shouldn ' t follow common name matched from an intro
false_positives = set ( [ 'complexion' , 'skin' , 'hair' , 'locks' , 'eyes' , 'st' , 'ave' , 'street' , 'avenue' , 'blvd' , 'boulevard' , 'highway' , 'circle' , 'hwy' , 'road' , 'rd' ] )
vowels_with_y = set ( list ( 'aeiouy' ) )
uniques = set ( [ ] )
for p in lowercase_parts :
part = p . lower ( )
part = re . sub ( r"(^| )i ?'? ?m " , " Intro " , part ) . strip ( )
part = part . replace ( '<br>' , ' ' ) . replace ( '&' , ' and ' )
part = re . sub ( r'\.+' , ' ' , part )
part = re . sub ( r'x+' , 'x' , part )
part = re . sub ( r'y+' , 'y' , part )
# Retain ' part ' to be used for separating comma - separated names
part = re . sub ( r',+' , ' ' , part )
part = re . sub ( r' +' , ' ' , part )
builder = [ ]
for pt in part . split ( ) :
if len ( pt ) > 2 :
lastc = pt [ len ( pt ) - 1 ]
# Convert names that have repeated last letters and the last letters aren ' t " E " and aren ' t two consonants following a vowel
if lastc == pt [ len ( pt ) - 2 ] and not ( lastc == 'e' or ( pt [ len ( pt ) - 3 ] in vowels_with_y and lastc not in vowels_with_y ) ) :
builder . append ( pt [ : len ( pt ) - 1 ] )
else :
builder . append ( pt )
else :
builder . append ( pt )
part = ' ' . join ( builder )
# Check if the part is entirely just a common word
ageless_title = re . sub ( r' - \d\d' , '' , part . lower ( ) )
ageless_title = re . sub ( r'\W+' , '' , ageless_title )
if ageless_title in common_names or ageless_title in main_names :
uniques . add ( ageless_title )
continue ;
# Find common names that come immediately before or after a " both - side intro "
for k in intros :
for intro in intros [ k ] :
if intro in part :
pts = part . split ( intro )
for i in range ( 1 , len ( pts ) ) :
if k == 'post' : # Check left side of intro
ptl = re . sub ( r'\W' , ' ' , pts [ i - 1 ] )
ptl = re . sub ( r' +' , ' ' , ptl )
tokenized = ptl . split ( )
if tokenized and tokenized [ len ( tokenized ) - 1 ] and tokenized [ len ( tokenized ) - 1 ] in common_names :
uniques . add ( tokenized [ len ( tokenized ) - 1 ] )
break
else : # Check right side of intro
ptr = re . sub ( r'\W' , ' ' , pts [ i ] )
ptr = re . sub ( r' +' , ' ' , ptr )
tokenized = ptr . split ( )
if tokenized and tokenized [ 0 ] in common_names :
if not ( len ( tokenized ) > 1 and tokenized [ 1 ] in false_positives or ( len ( tokenized ) > 2 and tokenized [ 2 ] in false_positives ) ) : # Next 2 words are not false positives
uniques . add ( tokenized [ 0 ] )
break
# Check intros that include regexes
for k in rgx_intros :
for intro in rgx_intros [ k ] :
matches = list ( re . findall ( intro , part ) )
for match in matches :
pts = part . split ( match )
for i in range ( 1 , len ( pts ) ) :
if k == 'post' : # Check left side of intro
ptl = re . sub ( r'\W' , ' ' , pts [ i - 1 ] )
ptl = re . sub ( r' +' , ' ' , ptl )
tokenized = ptl . split ( )
if tokenized and tokenized [ len ( tokenized ) - 1 ] and tokenized [ len ( tokenized ) - 1 ] in common_names :
uniques . add ( tokenized [ len ( tokenized ) - 1 ] )
break
else : # Check right side of intro
ptr = re . sub ( r'\W' , ' ' , pts [ i ] )
ptr = re . sub ( r' +' , ' ' , ptr )
tokenized = ptr . split ( )
if tokenized and tokenized [ 0 ] in common_names :
if not ( len ( tokenized ) > 1 and tokenized [ 1 ] in false_positives or ( len ( tokenized ) > 2 and tokenized [ 2 ] in false_positives ) ) : # Next 2 words are not false positives
uniques . add ( tokenized [ 0 ] )
break
# Find regular names
tokens = list ( re . split ( r'\W+' , part ) )
for i in range ( len ( tokens ) ) :
if not tokens [ i ] :
continue
curr = tokens [ i ]
# Check if current token has an ' s ' at the end ( ex : " brittanys beautiful body " )
if curr not in main_names and curr [ len ( curr ) - 1 ] == 's' and curr [ : - 1 ] in main_names :
curr = curr [ : - 1 ]
if curr in main_names : # Check if name is a two - part name
if i > 0 and ( '' . join ( [ tokens [ i - 1 ] , curr ] ) in main_names or '' . join ( [ tokens [ i - 1 ] , curr ] ) in common_names ) : # Prev token was a prefix to current
uniques . add ( ' ' . join ( [ tokens [ i - 1 ] , curr ] ) )
uniques . discard ( tokens [ i - 1 ] )
elif ( i < len ( tokens ) - 1 and tokens [ i + 1 ] and ( '' . join ( [ tokens [ i ] , tokens [ i + 1 ] ] ) in main_names or '' . join ( [ tokens [ i ] , tokens [ i + 1 ] ] ) in common_names ) ) : # Current token has a suffix
uniques . add ( ' ' . join ( [ tokens [ i ] , tokens [ i + 1 ] ] ) )
elif ( i < len ( tokens ) - 1 and tokens [ i + 1 ] and tokens [ i + 1 ] [ len ( tokens [ i + 1 ] ) - 1 ] == 's' and ( '' . join ( [ tokens [ i ] , tokens [ i + 1 ] [ : - 1 ] ] ) in main_names or '' . join ( [ tokens [ i ] , tokens [ i + 1 ] [ : - 1 ] ] ) in common_names ) ) : # Current token has a suffix with plural ending ( ' s ' )
uniques . add ( ' ' . join ( [ tokens [ i ] , tokens [ i + 1 ] [ : - 1 ] ] ) )
else : # Only single - word name
uniques . add ( curr )
# Find common words that are part of " pairing " phrases , paired with names that we found already
pairings = set ( [ 'and' , 'plus' , 'with' ] )
for i in range ( len ( tokens ) ) :
if tokens [ i ] not in uniques and tokens [ i ] in common_names :
if i > 1 and tokens [ i - 2 ] in uniques and tokens [ i - 1 ] in pairings : # ex : " jessica and diamond "
uniques . add ( tokens [ i ] )
elif i < len ( tokens ) - 2 and tokens [ i + 2 ] in uniques and tokens [ i + 1 ] in pairings : # ex : " diamond and jessica "
uniques . add ( tokens [ i ] )
# Odd cases
if ( 'mary' in uniques or 'jane' in uniques or 'mary jane' in uniques ) and re . search ( r'mary\W+jane' , part ) :
uniques . discard ( 'jane' )
uniques . discard ( 'mary' )
uniques . discard ( 'mary jane' )
if 'crystal' in uniques and re . search ( r'crystal ?(blue|spa|massage|parlor|city|stone)' , part ) :
uniques . discard ( 'crystal' )
# Remove names that are substrings of larger names
names_final = set ( [ ] )
if isinstance ( main_names , set ) : # Name datasets are raw sets of names
for match in uniques :
if not any ( match in name for name in [ v for v in uniques if v != match ] ) and match :
names_final . add ( match . strip ( ) )
else : # Name datasets are misspelled names mapped to properly spelled names
for match in uniques :
nosp_match = match . replace ( ' ' , '' )
if not any ( nosp_match in name for name in [ v for v in uniques if v != nosp_match ] ) and nosp_match : # add the parsed name , not the converted one ( ex : don ' t change " mickey " to " mikey " )
names_final . add ( nosp_match )
if debug == 1 :
print 'parse_name time taken: {} seconds' . format ( time . time ( ) - start )
return list ( names_final )
|
def sample_size_necessary_under_cph ( power , ratio_of_participants , p_exp , p_con , postulated_hazard_ratio , alpha = 0.05 ) :
"""This computes the sample size for needed power to compare two groups under a Cox
Proportional Hazard model .
Parameters
power : float
power to detect the magnitude of the hazard ratio as small as that specified by postulated _ hazard _ ratio .
ratio _ of _ participants : ratio of participants in experimental group over control group .
p _ exp : float
probability of failure in experimental group over period of study .
p _ con : float
probability of failure in control group over period of study
postulated _ hazard _ ratio : float
the postulated hazard ratio
alpha : float , optional ( default = 0.05)
type I error rate
Returns
n _ exp : integer
the samples sizes need for the experiment to achieve desired power
n _ con : integer
the samples sizes need for the control group to achieve desired power
Examples
> > > from lifelines . statistics import sample _ size _ necessary _ under _ cph
> > > desired _ power = 0.8
> > > ratio _ of _ participants = 1.
> > > p _ exp = 0.25
> > > p _ con = 0.35
> > > postulated _ hazard _ ratio = 0.7
> > > n _ exp , n _ con = sample _ size _ necessary _ under _ cph ( desired _ power , ratio _ of _ participants , p _ exp , p _ con , postulated _ hazard _ ratio )
> > > # ( 421 , 421)
References
https : / / cran . r - project . org / web / packages / powerSurvEpi / powerSurvEpi . pdf
See Also
power _ under _ cph"""
|
def z ( p ) :
return stats . norm . ppf ( p )
m = ( 1.0 / ratio_of_participants * ( ( ratio_of_participants * postulated_hazard_ratio + 1.0 ) / ( postulated_hazard_ratio - 1.0 ) ) ** 2 * ( z ( 1.0 - alpha / 2.0 ) + z ( power ) ) ** 2 )
n_exp = m * ratio_of_participants / ( ratio_of_participants * p_exp + p_con )
n_con = m / ( ratio_of_participants * p_exp + p_con )
return int ( np . ceil ( n_exp ) ) , int ( np . ceil ( n_con ) )
|
def is_required_version ( required_version = '0.0.0' ) :
'''Because different versions of Palo Alto support different command sets , this function
will return true if the current version of Palo Alto supports the required command .'''
|
if 'sw-version' in DETAILS [ 'grains_cache' ] :
current_version = DETAILS [ 'grains_cache' ] [ 'sw-version' ]
else : # If we do not have the current sw - version cached , we cannot check version requirements .
return False
required_version_split = required_version . split ( "." )
current_version_split = current_version . split ( "." )
try :
if int ( current_version_split [ 0 ] ) > int ( required_version_split [ 0 ] ) :
return True
elif int ( current_version_split [ 0 ] ) < int ( required_version_split [ 0 ] ) :
return False
if int ( current_version_split [ 1 ] ) > int ( required_version_split [ 1 ] ) :
return True
elif int ( current_version_split [ 1 ] ) < int ( required_version_split [ 1 ] ) :
return False
if int ( current_version_split [ 2 ] ) > int ( required_version_split [ 2 ] ) :
return True
elif int ( current_version_split [ 2 ] ) < int ( required_version_split [ 2 ] ) :
return False
# We have an exact match
return True
except Exception as err :
return False
|
def financials ( self , security ) :
"""get financials :
google finance provide annual and quanter financials , if annual is true , we will use annual data
Up to four lastest year / quanter data will be provided by google
Refer to page as an example : http : / / www . google . com / finance ? q = TSE : CVG & fstype = ii"""
|
try :
url = 'http://www.google.com/finance?q=%s&fstype=ii' % security
try :
page = self . _request ( url ) . read ( )
except UfException as ufExcep : # if symol is not right , will get 400
if Errors . NETWORK_400_ERROR == ufExcep . getCode :
raise UfException ( Errors . STOCK_SYMBOL_ERROR , "Can find data for stock %s, security error?" % security )
raise ufExcep
bPage = BeautifulSoup ( page )
target = bPage . find ( id = 'incinterimdiv' )
keyTimeValue = { }
# ugly do . . . while
i = 0
while True :
self . _parseTarget ( target , keyTimeValue )
if i < 5 :
i += 1
target = target . nextSibling
# ugly beautiful soap . . .
if '\n' == target :
target = target . nextSibling
else :
break
return keyTimeValue
except BaseException :
raise UfException ( Errors . UNKNOWN_ERROR , "Unknown Error in GoogleFinance.getHistoricalPrices %s" % traceback . format_exc ( ) )
|
def bqm_structured ( f ) :
"""Decorator to raise an error if the given bqm does not match the sampler ' s
structure .
Designed to be applied to : meth : ` . Sampler . sample ` . Expects the wrapped
function or method to accept a : obj : ` . BinaryQuadraticModel ` as the second
input and for the : class : ` . Sampler ` to also be : class : ` . Structured ` ."""
|
@ wraps ( f )
def new_f ( sampler , bqm , ** kwargs ) :
try :
structure = sampler . structure
adjacency = structure . adjacency
except AttributeError :
if isinstance ( sampler , Structured ) :
raise RuntimeError ( "something is wrong with the structured sampler" )
else :
raise TypeError ( "sampler does not have a structure property" )
if not all ( v in adjacency for v in bqm . linear ) : # todo : better error message
raise BinaryQuadraticModelStructureError ( "given bqm does not match the sampler's structure" )
if not all ( u in adjacency [ v ] for u , v in bqm . quadratic ) : # todo : better error message
raise BinaryQuadraticModelStructureError ( "given bqm does not match the sampler's structure" )
return f ( sampler , bqm , ** kwargs )
return new_f
|
def object_patch_set_data ( self , root , data , ** kwargs ) :
"""Creates a new merkledag object based on an existing one .
The new object will have the same links as the old object but
with the provided data instead of the old object ' s data contents .
. . code - block : : python
> > > c . object _ patch _ set _ data (
. . . ' QmNtXbF3AjAk59gQKRgEdVabHcSsiPUnJwHnZKyj2x8Z3k ' ,
. . . io . BytesIO ( b ' bla ' )
{ ' Hash ' : ' QmSw3k2qkv4ZPsbu9DVEJaTMszAQWNgM1FTFYpfZeNQWrd ' }
Parameters
root : str
IPFS hash of the object to modify
data : io . RawIOBase
The new data to store in root
Returns
dict : Hash of new object"""
|
args = ( root , )
body , headers = multipart . stream_files ( data , self . chunk_size )
return self . _client . request ( '/object/patch/set-data' , args , decoder = 'json' , data = body , headers = headers , ** kwargs )
|
def check_auth ( args , role = None ) :
"""Check the user authentication ."""
|
users = boto3 . resource ( "dynamodb" ) . Table ( os . environ [ 'people' ] )
if not ( args . get ( 'email' , None ) and args . get ( 'api_key' , None ) ) :
mesg = "Invalid request: `email` and `api_key` are required"
return { 'success' : False , 'message' : mesg }
user = users . get_item ( Key = { 'email' : args . get ( 'email' ) } )
if 'Item' not in user :
return { 'success' : False , 'message' : 'User does not exist.' }
user = user [ 'Item' ]
if user [ 'api_key' ] != args [ 'api_key' ] :
return { 'success' : False , 'message' : 'API key was invalid.' }
if role :
if user [ 'role' ] not in role :
mesg = 'User is not authorized to make this change.'
return { 'success' : False , 'message' : mesg }
return { 'success' : True , 'message' : None , 'user' : user }
|
def _notify_media_transport_available ( self , path , transport ) :
"""Called by the endpoint when a new media transport is
available"""
|
self . sink = BTAudioSink ( dev_path = path )
self . state = self . sink . State
self . sink . add_signal_receiver ( self . _property_change_event_handler , BTAudioSource . SIGNAL_PROPERTY_CHANGED , # noqa
transport )
|
def validate_fillna_kwargs ( value , method , validate_scalar_dict_value = True ) :
"""Validate the keyword arguments to ' fillna ' .
This checks that exactly one of ' value ' and ' method ' is specified .
If ' method ' is specified , this validates that it ' s a valid method .
Parameters
value , method : object
The ' value ' and ' method ' keyword arguments for ' fillna ' .
validate _ scalar _ dict _ value : bool , default True
Whether to validate that ' value ' is a scalar or dict . Specifically ,
validate that it is not a list or tuple .
Returns
value , method : object"""
|
from pandas . core . missing import clean_fill_method
if value is None and method is None :
raise ValueError ( "Must specify a fill 'value' or 'method'." )
elif value is None and method is not None :
method = clean_fill_method ( method )
elif value is not None and method is None :
if validate_scalar_dict_value and isinstance ( value , ( list , tuple ) ) :
raise TypeError ( '"value" parameter must be a scalar or dict, but ' 'you passed a "{0}"' . format ( type ( value ) . __name__ ) )
elif value is not None and method is not None :
raise ValueError ( "Cannot specify both 'value' and 'method'." )
return value , method
|
def unbounded ( self ) :
"""Whether solution is unbounded"""
|
self . _check_valid ( )
status = self . _problem . _p . Status
if ( status == gurobipy . GRB . INF_OR_UNBD and self . _problem . _p . params . DualReductions ) : # Disable dual reductions to obtain a definitve answer
self . _problem . _p . params . DualReductions = 0
try :
self . _problem . _p . optimize ( )
finally :
self . _problem . _p . params . DualReductions = 1
status = self . _problem . _p . Status
return status == gurobipy . GRB . UNBOUNDED
|
def getCalculationDependencies ( self , flat = False , deps = None ) :
"""Recursively calculates all dependencies of this calculation .
The return value is dictionary of dictionaries ( of dictionaries . . . )
{ service _ UID1:
{ service _ UID2:
{ service _ UID3 : { } ,
service _ UID4 : { } ,
set flat = True to get a simple list of AnalysisService objects"""
|
if deps is None :
deps = [ ] if flat is True else { }
for service in self . getDependentServices ( ) :
calc = service . getCalculation ( )
if calc :
calc . getCalculationDependencies ( flat , deps )
if flat :
deps . append ( service )
else :
deps [ service . UID ( ) ] = { }
return deps
|
def _count_files_by_type ( self , path , pattern , ignore = True ) :
"""Count files in the given path , with the given pattern .
If ` ignore = True ` , skip files in the ` _ IGNORE _ FILES ` list .
Returns
num _ files : int"""
|
# Get all files matching the given path and pattern
files = glob ( os . path . join ( path , pattern ) )
# Count the files
files = [ ff for ff in files if os . path . split ( ff ) [ - 1 ] not in self . _IGNORE_FILES or not ignore ]
num_files = len ( files )
return num_files
|
def connect ( self ) :
"""Connects to RabbitMQ"""
|
self . connection = Connection ( self . broker_url )
e = Exchange ( 'mease' , type = 'fanout' , durable = False , delivery_mode = 1 )
self . exchange = e ( self . connection . default_channel )
self . exchange . declare ( )
|
def get_pdb_id ( self ) :
'''Return the PDB ID . If one was passed in to the constructor , this takes precedence , otherwise the header is
parsed to try to find an ID . The header does not always contain a PDB ID in regular PDB files and appears to
always have an ID of ' XXXX ' in biological units so the constructor override is useful .'''
|
if self . pdb_id :
return self . pdb_id
else :
header = self . parsed_lines [ "HEADER" ]
assert ( len ( header ) <= 1 )
if header :
self . pdb_id = header [ 0 ] [ 62 : 66 ]
return self . pdb_id
return None
|
def predict_magnification ( self , Xnew , kern = None , mean = True , covariance = True , dimensions = None ) :
"""Predict the magnification factor as
sqrt ( det ( G ) )
for each point N in Xnew .
: param bool mean : whether to include the mean of the wishart embedding .
: param bool covariance : whether to include the covariance of the wishart embedding .
: param array - like dimensions : which dimensions of the input space to use [ defaults to self . get _ most _ significant _ input _ dimensions ( ) [ : 2 ] ]"""
|
G = self . predict_wishart_embedding ( Xnew , kern , mean , covariance )
if dimensions is None :
dimensions = self . get_most_significant_input_dimensions ( ) [ : 2 ]
G = G [ : , dimensions ] [ : , : , dimensions ]
from . . util . linalg import jitchol
mag = np . empty ( Xnew . shape [ 0 ] )
for n in range ( Xnew . shape [ 0 ] ) :
try :
mag [ n ] = np . sqrt ( np . exp ( 2 * np . sum ( np . log ( np . diag ( jitchol ( G [ n , : , : ] ) ) ) ) ) )
except :
mag [ n ] = np . sqrt ( np . linalg . det ( G [ n , : , : ] ) )
return mag
|
def format_csv ( self , delim = ',' , qu = '"' ) :
"""Prepares the data in CSV format"""
|
res = qu + self . name + qu + delim
if self . data :
for d in self . data :
res += qu + str ( d ) + qu + delim
return res + '\n'
|
def delete_cas ( self , key , * , index ) :
"""Deletes the Key with check - and - set semantics .
Parameters :
key ( str ) : Key to delete
index ( ObjectIndex ) : Index ID
The Key will only be deleted if its current modify index matches
the supplied Index"""
|
self . append ( { "Verb" : "delete-cas" , "Key" : key , "Index" : extract_attr ( index , keys = [ "ModifyIndex" , "Index" ] ) } )
return self
|
def create ( name , grid , spacing , diameter , depth , volume = 0 ) :
"""Creates a labware definition based on a rectangular gird , depth , diameter ,
and spacing . Note that this function can only create labware with regularly
spaced wells in a rectangular format , of equal height , depth , and radius .
Irregular labware defintions will have to be made in other ways or modified
using a regular definition as a starting point . Also , upon creation a
definition always has its lower - left well at ( 0 , 0 , 0 ) , such that this
labware _ must _ be calibrated before use .
: param name : the name of the labware to be used with ` labware . load `
: param grid : a 2 - tuple of integers representing ( < n _ columns > , < n _ rows > )
: param spacing : a 2 - tuple of floats representing
( < col _ spacing , < row _ spacing )
: param diameter : a float representing the internal diameter of each well
: param depth : a float representing the distance from the top of each well
to the internal bottom of the same well
: param volume : [ optional ] the maximum volume of each well
: return : the labware object created by this function"""
|
columns , rows = grid
col_spacing , row_spacing = spacing
custom_container = Container ( )
properties = { 'type' : 'custom' , 'diameter' : diameter , 'height' : depth , 'total-liquid-volume' : volume }
for c in range ( columns ) :
for r in range ( rows ) :
well = Well ( properties = properties )
well_name = chr ( r + ord ( 'A' ) ) + str ( 1 + c )
coordinates = ( c * col_spacing , ( rows - r - 1 ) * row_spacing , 0 )
custom_container . add ( well , well_name , coordinates )
database . save_new_container ( custom_container , name )
return database . load_container ( name )
|
def remover ( self , id_tipo_acesso ) :
"""Removes access type by its identifier .
: param id _ tipo _ acesso : Access type identifier .
: return : None
: raise TipoAcessoError : Access type associated with equipment , cannot be removed .
: raise InvalidParameterError : Protocol value is invalid or none .
: raise TipoAcessoNaoExisteError : Access type doesn ' t exist .
: raise DataBaseError : Networkapi failed to access the database .
: raise XMLError : Networkapi failed to generate the XML response ."""
|
if not is_valid_int_param ( id_tipo_acesso ) :
raise InvalidParameterError ( u'Access type id is invalid or was not informed.' )
url = 'tipoacesso/' + str ( id_tipo_acesso ) + '/'
code , xml = self . submit ( None , 'DELETE' , url )
return self . response ( code , xml )
|
def register_printer ( self , printer_class ) :
""": param printer _ class : Class inheriting from ` AbstractPrinter ` ."""
|
self . _check_common_things ( 'printer' , printer_class , AbstractPrinter , self . _printers )
instance = printer_class ( self , logger_printer )
self . _printers . append ( instance )
|
def escape_path ( pth ) :
"""Hex / unicode escapes a path .
Escapes a path so that it can be represented faithfully in an HDF5
file without changing directories . This means that leading ` ` ' . ' ` `
must be escaped . ` ` ' / ' ` ` and null must be escaped to . Backslashes
are escaped as double backslashes . Other escaped characters are
replaced with ` ` ' \\ xYY ' ` ` , ` ` ' \\ uYYYY ' , or ` ` ' \\ UYYYYY ' ` ` where Y
are hex digits depending on the unicode numerical value of the
character . for ` ` ' . ' ` ` , both slashes , and null ; this will be the
former ( ` ` ' \\ xYY ' ` ` ) .
. . versionadded : : 0.2
Parameters
pth : str or bytes
The path to escape .
Returns
epth : str
The escaped path .
Raises
TypeError
If ` pth ` is not the right type .
See Also
unescape _ path"""
|
if isinstance ( pth , bytes ) :
pth = pth . decode ( 'utf-8' )
if sys . hexversion >= 0x03000000 :
if not isinstance ( pth , str ) :
raise TypeError ( 'pth must be str or bytes.' )
match = _find_dots_re . match ( pth )
if match is None :
prefix = ''
s = pth
else :
prefix = '\\x2e' * match . end ( )
s = pth [ match . end ( ) : ]
else :
if not isinstance ( pth , unicode ) :
raise TypeError ( 'pth must be unicode or str.' )
match = _find_dots_re . match ( pth )
if match is None :
prefix = unicode ( '' )
s = pth
else :
prefix = unicode ( '\\x2e' ) * match . end ( )
s = pth [ match . end ( ) : ]
return prefix + _find_fslashnull_re . sub ( _replace_fun_escape , s )
|
def get_type_hierarchy ( s ) :
"""Get the sequence of parents from ` s ` to Statement .
Parameters
s : a class or instance of a child of Statement
For example the statement ` Phosphorylation ( MEK ( ) , ERK ( ) ) ` or just the
class ` Phosphorylation ` .
Returns
parent _ list : list [ types ]
A list of the types leading up to Statement .
Examples
> > s = Phosphorylation ( MAPK1 ( ) , Elk1 ( ) )
> > get _ type _ hierarchy ( s )
[ Phosphorylation , AddModification , Modification , Statement ]
> > get _ type _ hierarchy ( AddModification )
[ AddModification , Modification , Statement ]"""
|
tp = type ( s ) if not isinstance ( s , type ) else s
p_list = [ tp ]
for p in tp . __bases__ :
if p is not Statement :
p_list . extend ( get_type_hierarchy ( p ) )
else :
p_list . append ( p )
return p_list
|
def kronecker_decomposition ( gate : Gate ) -> Circuit :
"""Decompose a 2 - qubit unitary composed of two 1 - qubit local gates .
Uses the " Nearest Kronecker Product " algorithm . Will give erratic
results if the gate is not the direct product of two 1 - qubit gates ."""
|
# An alternative approach would be to take partial traces , but
# this approach appears to be more robust .
if gate . qubit_nb != 2 :
raise ValueError ( 'Expected 2-qubit gate' )
U = asarray ( gate . asoperator ( ) )
rank = 2 ** gate . qubit_nb
U /= np . linalg . det ( U ) ** ( 1 / rank )
R = np . stack ( [ U [ 0 : 2 , 0 : 2 ] . reshape ( 4 ) , U [ 0 : 2 , 2 : 4 ] . reshape ( 4 ) , U [ 2 : 4 , 0 : 2 ] . reshape ( 4 ) , U [ 2 : 4 , 2 : 4 ] . reshape ( 4 ) ] )
u , s , vh = np . linalg . svd ( R )
v = vh . transpose ( )
A = ( np . sqrt ( s [ 0 ] ) * u [ : , 0 ] ) . reshape ( 2 , 2 )
B = ( np . sqrt ( s [ 0 ] ) * v [ : , 0 ] ) . reshape ( 2 , 2 )
q0 , q1 = gate . qubits
g0 = Gate ( A , qubits = [ q0 ] )
g1 = Gate ( B , qubits = [ q1 ] )
if not gates_close ( gate , Circuit ( [ g0 , g1 ] ) . asgate ( ) ) :
raise ValueError ( "Gate cannot be decomposed into two 1-qubit gates" )
circ = Circuit ( )
circ += zyz_decomposition ( g0 )
circ += zyz_decomposition ( g1 )
assert gates_close ( gate , circ . asgate ( ) )
# Sanity check
return circ
|
def _x_format ( self ) :
"""Return the value formatter for this graph"""
|
def datetime_to_str ( x ) :
dt = datetime . utcfromtimestamp ( x )
return self . x_value_formatter ( dt )
return datetime_to_str
|
async def handler ( self , request : Request ) -> Tuple [ int , str , List [ Tuple [ str , str ] ] , bytes ] :
"""The handler handling each request
: param request : the Request instance
: return : The Response instance"""
|
response : 'Response' = Response ( )
handler : Callable = empty
chain_reverse = self . middleware [ : : - 1 ]
for middleware in chain_reverse :
handler = map_context_to_middleware ( middleware , self , request , response , handler )
try :
await handler ( )
except HttpException as e :
response . code = e . code
response . content = e . body
return response . code , response . status , response . header_as_list , response . output
|
def _evaluate ( self , * args , ** kwargs ) :
"""NAME :
_ _ call _ _ ( _ evaluate )
PURPOSE :
evaluate the actions ( jr , lz , jz )
INPUT :
Either :
a ) R , vR , vT , z , vz [ , phi ] :
1 ) floats : phase - space value for single object ( phi is optional ) ( each can be a Quantity )
2 ) numpy . ndarray : [ N ] phase - space values for N objects ( each can be a Quantity )
b ) Orbit instance : initial condition used if that ' s it , orbit ( t ) if there is a time given as well as the second argument
cumul = if True , return the cumulative average actions ( to look
at convergence )
OUTPUT :
( jr , lz , jz )
HISTORY :
2013-09-10 - Written - Bovy ( IAS )"""
|
R , vR , vT , z , vz , phi = self . _parse_args ( False , False , * args )
if self . _c : # pragma : no cover
pass
else : # Use self . _ aAI to calculate the actions and angles in the isochrone potential
acfs = self . _aAI . _actionsFreqsAngles ( R . flatten ( ) , vR . flatten ( ) , vT . flatten ( ) , z . flatten ( ) , vz . flatten ( ) , phi . flatten ( ) )
jrI = nu . reshape ( acfs [ 0 ] , R . shape ) [ : , : - 1 ]
jzI = nu . reshape ( acfs [ 2 ] , R . shape ) [ : , : - 1 ]
anglerI = nu . reshape ( acfs [ 6 ] , R . shape )
anglezI = nu . reshape ( acfs [ 8 ] , R . shape )
if nu . any ( ( nu . fabs ( nu . amax ( anglerI , axis = 1 ) - _TWOPI ) > _ANGLETOL ) * ( nu . fabs ( nu . amin ( anglerI , axis = 1 ) ) > _ANGLETOL ) ) : # pragma : no cover
warnings . warn ( "Full radial angle range not covered for at least one object; actions are likely not reliable" , galpyWarning )
if nu . any ( ( nu . fabs ( nu . amax ( anglezI , axis = 1 ) - _TWOPI ) > _ANGLETOL ) * ( nu . fabs ( nu . amin ( anglezI , axis = 1 ) ) > _ANGLETOL ) ) : # pragma : no cover
warnings . warn ( "Full vertical angle range not covered for at least one object; actions are likely not reliable" , galpyWarning )
danglerI = ( ( nu . roll ( anglerI , - 1 , axis = 1 ) - anglerI ) % _TWOPI ) [ : , : - 1 ]
danglezI = ( ( nu . roll ( anglezI , - 1 , axis = 1 ) - anglezI ) % _TWOPI ) [ : , : - 1 ]
if kwargs . get ( 'cumul' , False ) :
sumFunc = nu . cumsum
else :
sumFunc = nu . sum
jr = sumFunc ( jrI * danglerI , axis = 1 ) / sumFunc ( danglerI , axis = 1 )
jz = sumFunc ( jzI * danglezI , axis = 1 ) / sumFunc ( danglezI , axis = 1 )
if _isNonAxi ( self . _pot ) :
lzI = nu . reshape ( acfs [ 1 ] , R . shape ) [ : , : - 1 ]
anglephiI = nu . reshape ( acfs [ 7 ] , R . shape )
danglephiI = ( ( nu . roll ( anglephiI , - 1 , axis = 1 ) - anglephiI ) % _TWOPI ) [ : , : - 1 ]
if nu . any ( ( nu . fabs ( nu . amax ( anglephiI , axis = 1 ) - _TWOPI ) > _ANGLETOL ) * ( nu . fabs ( nu . amin ( anglephiI , axis = 1 ) ) > _ANGLETOL ) ) : # pragma : no cover
warnings . warn ( "Full azimuthal angle range not covered for at least one object; actions are likely not reliable" , galpyWarning )
lz = sumFunc ( lzI * danglephiI , axis = 1 ) / sumFunc ( danglephiI , axis = 1 )
else :
lz = R [ : , 0 ] * vT [ : , 0 ]
return ( jr , lz , jz )
|
def setup ( self ) :
"""Configures the actor before execution .
: return : None if successful , otherwise error message
: rtype : str"""
|
result = super ( ActorHandler , self ) . setup ( )
if result is None :
self . update_parent ( )
try :
self . check_actors ( self . actors )
except Exception as e :
result = str ( e )
if result is None :
for actor in self . actors :
name = actor . name
newname = actor . unique_name ( actor . name )
if name != newname :
actor . name = newname
if result is None :
for actor in self . actors :
if actor . skip :
continue
result = actor . setup ( )
if result is not None :
break
if result is None :
result = self . _director . setup ( )
return result
|
def bestfit_func ( self , bestfit_x ) :
"""Returns y value"""
|
if not self . bestfit_func :
raise KeyError ( "Do do_bestfit first" )
return self . args [ "func" ] ( self . fit_args , bestfit_x )
|
def next ( self ) :
"""Returns the next row from the Instances object .
: return : the next Instance object
: rtype : Instance"""
|
if self . row < self . data . num_instances :
index = self . row
self . row += 1
return self . data . get_instance ( index )
else :
raise StopIteration ( )
|
def join ( * paths ) : # type : ( * Text ) - > Text
"""Join any number of paths together .
Arguments :
* paths ( str ) : Paths to join , given as positional arguments .
Returns :
str : The joined path .
Example :
> > > join ( ' foo ' , ' bar ' , ' baz ' )
' foo / bar / baz '
> > > join ( ' foo / bar ' , ' . . / baz ' )
' foo / baz '
> > > join ( ' foo / bar ' , ' / baz ' )
' / baz '"""
|
absolute = False
relpaths = [ ]
# type : List [ Text ]
for p in paths :
if p :
if p [ 0 ] == "/" :
del relpaths [ : ]
absolute = True
relpaths . append ( p )
path = normpath ( "/" . join ( relpaths ) )
if absolute :
path = abspath ( path )
return path
|
def text ( value , encoding = "utf-8" , errors = "strict" ) :
"""Convert a value to str on Python 3 and unicode on Python 2."""
|
if isinstance ( value , text_type ) :
return value
elif isinstance ( value , bytes ) :
return text_type ( value , encoding , errors )
else :
return text_type ( value )
|
def _add_event_in_element ( self , element , event ) :
"""Add a type of event in element .
: param element : The element .
: type element : hatemile . util . html . htmldomelement . HTMLDOMElement
: param event : The type of event .
: type event : str"""
|
if not self . main_script_added :
self . _generate_main_scripts ( )
if self . script_list is not None :
self . id_generator . generate_id ( element )
self . script_list . append_text ( event + "Elements.push('" + element . get_attribute ( 'id' ) + "');" )
|
def run ( ) :
"""Runs the linter and tests
: return :
A bool - if the linter and tests ran successfully"""
|
print ( 'Python ' + sys . version . replace ( '\n' , '' ) )
try :
oscrypto_tests_module_info = imp . find_module ( 'tests' , [ os . path . join ( build_root , 'oscrypto' ) ] )
oscrypto_tests = imp . load_module ( 'oscrypto.tests' , * oscrypto_tests_module_info )
oscrypto = oscrypto_tests . local_oscrypto ( )
print ( '\noscrypto backend: %s' % oscrypto . backend ( ) )
except ( ImportError ) :
pass
if run_lint :
print ( '' )
lint_result = run_lint ( )
else :
lint_result = True
if run_coverage :
print ( '\nRunning tests (via coverage.py)' )
sys . stdout . flush ( )
tests_result = run_coverage ( ci = True )
else :
print ( '\nRunning tests' )
sys . stdout . flush ( )
tests_result = run_tests ( )
sys . stdout . flush ( )
return lint_result and tests_result
|
def evaluate ( args ) :
"""% prog evaluate prediction . bed reality . bed fastafile
Make a truth table like :
True False - - - Reality
True TP FP
False FN TN
| - - - - Prediction
Sn = TP / ( all true in reality ) = TP / ( TP + FN )
Sp = TP / ( all true in prediction ) = TP / ( TP + FP )
Ac = ( TP + TN ) / ( TP + FP + FN + TN )"""
|
from jcvi . formats . sizes import Sizes
p = OptionParser ( evaluate . __doc__ )
p . add_option ( "--query" , help = "Chromosome location [default: %default]" )
opts , args = p . parse_args ( args )
if len ( args ) != 3 :
sys . exit ( not p . print_help ( ) )
prediction , reality , fastafile = args
query = opts . query
prediction = mergeBed ( prediction )
reality = mergeBed ( reality )
sizes = Sizes ( fastafile )
sizesfile = sizes . filename
prediction_complement = complementBed ( prediction , sizesfile )
reality_complement = complementBed ( reality , sizesfile )
TPbed = intersectBed ( prediction , reality )
FPbed = intersectBed ( prediction , reality_complement )
FNbed = intersectBed ( prediction_complement , reality )
TNbed = intersectBed ( prediction_complement , reality_complement )
beds = ( TPbed , FPbed , FNbed , TNbed )
if query :
subbeds = [ ]
rr = query_to_range ( query , sizes )
ce = 'echo "{0}"' . format ( "\t" . join ( str ( x ) for x in rr ) )
for b in beds :
subbed = "." . join ( ( b , query ) )
cmd = ce + " | intersectBed -a stdin -b {0}" . format ( b )
sh ( cmd , outfile = subbed )
subbeds . append ( subbed )
beds = subbeds
be = BedEvaluate ( * beds )
print ( be , file = sys . stderr )
if query :
for b in subbeds :
os . remove ( b )
return be
|
def find_next_candidate ( self ) :
"""Returns the next candidate Node for ( potential ) evaluation .
The candidate list ( really a stack ) initially consists of all of
the top - level ( command line ) targets provided when the Taskmaster
was initialized . While we walk the DAG , visiting Nodes , all the
children that haven ' t finished processing get pushed on to the
candidate list . Each child can then be popped and examined in
turn for whether * their * children are all up - to - date , in which
case a Task will be created for their actual evaluation and
potential building .
Here is where we also allow candidate Nodes to alter the list of
Nodes that should be examined . This is used , for example , when
invoking SCons in a source directory . A source directory Node can
return its corresponding build directory Node , essentially saying ,
" Hey , you really need to build this thing over here instead . " """
|
try :
return self . candidates . pop ( )
except IndexError :
pass
try :
node = self . top_targets_left . pop ( )
except IndexError :
return None
self . current_top = node
alt , message = node . alter_targets ( )
if alt :
self . message = message
self . candidates . append ( node )
self . candidates . extend ( self . order ( alt ) )
node = self . candidates . pop ( )
return node
|
def confirms ( self , txid ) :
"""Returns number of confirms or None if unpublished ."""
|
txid = deserialize . txid ( txid )
return self . service . confirms ( txid )
|
def append_fresh_table ( self , fresh_table ) :
"""Gets called by FreshTable instances when they get written to ."""
|
if fresh_table . name :
elements = [ ]
if fresh_table . is_array :
elements += [ element_factory . create_array_of_tables_header_element ( fresh_table . name ) ]
else :
elements += [ element_factory . create_table_header_element ( fresh_table . name ) ]
elements += [ fresh_table , element_factory . create_newline_element ( ) ]
self . append_elements ( elements )
else : # It ' s an anonymous table
self . prepend_elements ( [ fresh_table , element_factory . create_newline_element ( ) ] )
|
async def status ( self , switch = None ) :
"""Get current relay status ."""
|
if switch is not None :
if self . waiters or self . in_transaction :
fut = self . loop . create_future ( )
self . status_waiters . append ( fut )
states = await fut
state = states [ switch ]
else :
packet = self . protocol . format_packet ( b"\x1e" )
states = await self . _send ( packet )
state = states [ switch ]
else :
if self . waiters or self . in_transaction :
fut = self . loop . create_future ( )
self . status_waiters . append ( fut )
state = await fut
else :
packet = self . protocol . format_packet ( b"\x1e" )
state = await self . _send ( packet )
return state
|
def _create_and_send_json_bulk ( self , payload , req_url , request_type = "POST" ) :
"""Create a json , do a request to the URL and process the response .
: param list payload : contains the informations necessary for the action .
It ' s a list of dictionnary .
: param str req _ url : URL to request with the payload .
: param str request _ type : type of request , either " POST " or " DELETE " .
: default request _ type : " POST " .
: return : response of the request .
: rtype : list of dict .
: raises CraftAiBadRequestError : if the payload doesn ' t have the
correct form to be transformed into a json or request _ type is
neither " POST " or " DELETE " ."""
|
# Extra header in addition to the main session ' s
ct_header = { "Content-Type" : "application/json; charset=utf-8" }
try :
json_pl = json . dumps ( payload )
except TypeError as err :
raise CraftAiBadRequestError ( "Error while dumping the payload into json" "format when converting it for the bulk request. {}" . format ( err . __str__ ( ) ) )
if request_type == "POST" :
resp = self . _requests_session . post ( req_url , headers = ct_header , data = json_pl )
elif request_type == "DELETE" :
resp = self . _requests_session . delete ( req_url , headers = ct_header , data = json_pl )
else :
raise CraftAiBadRequestError ( "Request for the bulk API should be either a POST or DELETE" "request" )
agents = self . _decode_response ( resp )
agents = self . _decode_response_bulk ( agents )
return agents
|
def execute ( self , env , args ) :
"""Removes a task .
` env `
Runtime ` ` Environment ` ` instance .
` args `
Arguments object from arg parser ."""
|
# extract args
task_name = args . task_name
force = args . force
if env . task . active and env . task . name == task_name :
raise errors . ActiveTask
if not env . task . exists ( task_name ) :
raise errors . TaskNotFound ( task_name )
if force :
env . task . remove ( task_name )
else :
try :
while True :
prompt = ( 'Are you sure you want to delete "{0}" (y/n)? ' . format ( task_name ) )
resp = env . io . prompt ( prompt , newline = False ) . lower ( )
if resp in ( 'y' , 'n' ) :
if resp == 'y' :
env . task . remove ( task_name )
break
except KeyboardInterrupt :
pass
|
def _control_vm ( self , command , expected = None ) :
"""Executes a command with QEMU monitor when this VM is running .
: param command : QEMU monitor command ( e . g . info status , stop etc . )
: param expected : An array of expected strings
: returns : result of the command ( matched object or None )"""
|
result = None
if self . is_running ( ) and self . _monitor :
log . debug ( "Execute QEMU monitor command: {}" . format ( command ) )
try :
log . info ( "Connecting to Qemu monitor on {}:{}" . format ( self . _monitor_host , self . _monitor ) )
reader , writer = yield from asyncio . open_connection ( self . _monitor_host , self . _monitor )
except OSError as e :
log . warn ( "Could not connect to QEMU monitor: {}" . format ( e ) )
return result
try :
writer . write ( command . encode ( 'ascii' ) + b"\n" )
except OSError as e :
log . warn ( "Could not write to QEMU monitor: {}" . format ( e ) )
writer . close ( )
return result
if expected :
try :
while result is None :
line = yield from reader . readline ( )
if not line :
break
for expect in expected :
if expect in line :
result = line . decode ( "utf-8" ) . strip ( )
break
except EOFError as e :
log . warn ( "Could not read from QEMU monitor: {}" . format ( e ) )
writer . close ( )
return result
|
def video_category ( self ) :
"""doc : http : / / open . youku . com / docs / doc ? id = 90"""
|
url = 'https://openapi.youku.com/v2/schemas/video/category.json'
r = requests . get ( url )
check_error ( r )
return r . json ( )
|
def partitioned_repertoire ( self , direction , partition ) :
"""Compute the repertoire over the partition in the given direction ."""
|
system = self . system [ direction ]
return system . partitioned_repertoire ( direction , partition )
|
def launch ( self , f ) :
"""Decorator maps a view function as the endpoint for an Alexa LaunchRequest and starts the skill .
@ ask . launch
def launched ( ) :
return question ( ' Welcome to Foo ' )
The wrapped function is registered as the launch view function and renders the response
for requests to the Launch URL .
A request to the launch URL is verified with the Alexa server before the payload is
passed to the view function .
Arguments :
f { function } - - Launch view function"""
|
self . _launch_view_func = f
@ wraps ( f )
def wrapper ( * args , ** kw ) :
self . _flask_view_func ( * args , ** kw )
return f
|
def _start_keep_alive ( self ) :
'''Start the keep alive thread as a daemon'''
|
keep_alive_thread = threading . Thread ( target = self . keep_alive )
keep_alive_thread . daemon = True
keep_alive_thread . start ( )
|
def get_index_names ( self , db_name , tbl_name , max_indexes ) :
"""Parameters :
- db _ name
- tbl _ name
- max _ indexes"""
|
self . send_get_index_names ( db_name , tbl_name , max_indexes )
return self . recv_get_index_names ( )
|
def gain_to_loss_ratio ( self ) :
"""Gain - to - loss ratio , ratio of positive to negative returns .
Formula :
( n pos . / n neg . ) * ( avg . up - month return / avg . down - month return )
[ Source : CFA Institute ]
Returns
float"""
|
gt = self > 0
lt = self < 0
return ( nansum ( gt ) / nansum ( lt ) ) * ( self [ gt ] . mean ( ) / self [ lt ] . mean ( ) )
|
def run ( self , batch = True , interruptible = None , inplace = True ) :
"""Run task
: param batch if False batching will be disabled .
: param interruptible : If true interruptible instance
will be used .
: param inplace Apply action on the current object or return a new one .
: return : Task object ."""
|
params = { }
if not batch :
params [ 'batch' ] = False
if interruptible is not None :
params [ 'use_interruptible_instances' ] = interruptible
extra = { 'resource' : self . __class__ . __name__ , 'query' : { 'id' : self . id , 'batch' : batch } }
logger . info ( 'Running task' , extra = extra )
task_data = self . _api . post ( url = self . _URL [ 'run' ] . format ( id = self . id ) , params = params ) . json ( )
return Task ( api = self . _api , ** task_data )
|
def support_support_param_username ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
support = ET . SubElement ( config , "support" , xmlns = "urn:brocade.com:mgmt:brocade-ras" )
support_param = ET . SubElement ( support , "support-param" )
username = ET . SubElement ( support_param , "username" )
username . text = kwargs . pop ( 'username' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def _parseRelocations ( self , sections ) :
"""Parses the relocations and add those to the section"""
|
for section in sections :
if section . header . sh_link != SHN . UNDEF and section . header . sh_type in ( SHT . REL , SHT . RELA ) :
symbols = sections [ section . header . sh_link ] . symbols
relocations = self . __parseRelocationEntries ( section , symbols )
section . relocations = relocations
|
def options ( self ) :
"""Dictionary of options which affect the curve fitting algorithm .
Must contain the key ` fit _ function ` which must be set to
the function that will perform the fit .
All other options are passed as keyword arguments to the ` fit _ function ` .
The default options use ` scipy . optimize . curve _ fit ` .
If ` fit _ function ` has the special value ` lmfit ` , then [ lmfit ] [ 1]
is used for the fit and all other options are passed as keyword arguments
to [ ` lmfit . minimize ` ] [ 2 ] .
When using [ lmfit ] [ 1 ] , additional control of the fit is obtained by overriding
` scipy _ data _ fitting . Fit . lmfit _ fcn2min ` .
Any other function may be used for ` fit _ function ` that satisfies the following criteria :
* Must accept the following non - keyword arguments in this order
( even if unused in the fitting function ) :
1 . Function to fit , see ` scipy _ data _ fitting . Fit . function ` .
2 . Independent values : see ` scipy _ data _ fitting . Data . array ` .
3 . Dependent values : see ` scipy _ data _ fitting . Data . array ` .
4 . List of the initial fitting parameter guesses in same order
as given by ` scipy _ data _ fitting . Fit . fitting _ parameters ` .
The initial guesses will be scaled by their prefix before being passed .
* Can accept any keyword arguments set in ` scipy _ data _ fitting . Fit . options ` .
For example , this is how one could pass error values to the fitting function .
* Must return an object whose first element is a list or array of the values
of the fitted parameters ( and only those values ) in same order
as given by ` scipy _ data _ fitting . Fit . fitting _ parameters ` .
Default :
# ! python
' fit _ function ' : scipy . optimize . curve _ fit ,
' maxfev ' : 1000,
[1 ] : http : / / lmfit . github . io / lmfit - py /
[2 ] : http : / / lmfit . github . io / lmfit - py / fitting . html # the - minimize - function"""
|
if not hasattr ( self , '_options' ) :
self . _options = { 'fit_function' : scipy . optimize . curve_fit , 'maxfev' : 1000 , }
return self . _options
|
def uri ( self ) :
"Fedora URI for this object ( ` ` info : fedora / foo : # # # ` ` form of object pid )"
|
use_pid = self . pid
if callable ( use_pid ) :
use_pid = self . DUMMY_PID
return 'info:fedora/' + use_pid
|
def _set_flow_rate ( pipette , params ) -> None :
"""Set flow rate in uL / mm , to value obtained from command ' s params ."""
|
flow_rate_param = params [ 'flowRate' ]
if not ( flow_rate_param > 0 ) :
raise RuntimeError ( 'Positive flowRate param required' )
pipette . flow_rate = { 'aspirate' : flow_rate_param , 'dispense' : flow_rate_param }
|
def distance_stats ( x , y , ** kwargs ) :
"""distance _ stats ( x , y , * , exponent = 1)
Computes the usual ( biased ) estimators for the distance covariance
and distance correlation between two random vectors , and the
individual distance variances .
Parameters
x : array _ like
First random vector . The columns correspond with the individual random
variables while the rows are individual instances of the random vector .
y : array _ like
Second random vector . The columns correspond with the individual random
variables while the rows are individual instances of the random vector .
exponent : float
Exponent of the Euclidean distance , in the range : math : ` ( 0 , 2 ) ` .
Equivalently , it is twice the Hurst parameter of fractional Brownian
motion .
Returns
Stats
Distance covariance , distance correlation ,
distance variance of the first random vector and
distance variance of the second random vector .
See Also
distance _ covariance
distance _ correlation
Notes
It is less efficient to compute the statistics separately , rather than
using this function , because some computations can be shared .
The algorithm uses the fast distance covariance algorithm proposed in
: cite : ` b - fast _ distance _ correlation ` when possible .
Examples
> > > import numpy as np
> > > import dcor
> > > a = np . array ( [ [ 1 , 2 , 3 , 4 ] ,
. . . [ 5 , 6 , 7 , 8 ] ,
. . . [ 9 , 10 , 11 , 12 ] ,
. . . [ 13 , 14 , 15 , 16 ] ] )
> > > b = np . array ( [ [ 1 ] , [ 0 ] , [ 0 ] , [ 1 ] ] )
> > > dcor . distance _ stats ( a , a ) # doctest : + NORMALIZE _ WHITESPACE
Stats ( covariance _ xy = 7.2111025 . . . , correlation _ xy = 1.0,
variance _ x = 7.2111025 . . . , variance _ y = 7.2111025 . . . )
> > > dcor . distance _ stats ( a , b ) # doctest : + NORMALIZE _ WHITESPACE
Stats ( covariance _ xy = 1.0 , correlation _ xy = 0.5266403 . . . ,
variance _ x = 7.2111025 . . . , variance _ y = 0.5)
> > > dcor . distance _ stats ( b , b ) # doctest : + NORMALIZE _ WHITESPACE
Stats ( covariance _ xy = 0.5 , correlation _ xy = 1.0 , variance _ x = 0.5,
variance _ y = 0.5)
> > > dcor . distance _ stats ( a , b , exponent = 0.5 ) # doctest : + ELLIPSIS
. . . # doctest : + NORMALIZE _ WHITESPACE
Stats ( covariance _ xy = 0.6087614 . . . , correlation _ xy = 0.6703214 . . . ,
variance _ x = 1.6495217 . . . , variance _ y = 0.5)"""
|
return Stats ( * [ _sqrt ( s ) for s in distance_stats_sqr ( x , y , ** kwargs ) ] )
|
def wait_for_logs_matching ( self , matcher , timeout = 10 , encoding = 'utf-8' , ** logs_kwargs ) :
"""Wait for logs matching the given matcher ."""
|
wait_for_logs_matching ( self . inner ( ) , matcher , timeout = timeout , encoding = encoding , ** logs_kwargs )
|
def validate ( self , corpus ) :
"""Perform validation on the given corpus .
Args :
corpus ( Corpus ) : The corpus to test / validate ."""
|
passed = True
results = { }
for validator in self . validators :
sub_result = validator . validate ( corpus )
results [ validator . name ( ) ] = sub_result
if not sub_result . passed :
passed = False
return CombinedValidationResult ( passed , results )
|
def _fix_paths ( self , data ) :
"""All paths needs to be fixed - add PROJ _ DIR prefix + normalize"""
|
data [ 'include_paths' ] = [ join ( '$PROJ_DIR$' , path ) for path in data [ 'include_paths' ] ]
if data [ 'linker_file' ] :
data [ 'linker_file' ] = join ( '$PROJ_DIR$' , data [ 'linker_file' ] )
data [ 'groups' ] = { }
for attribute in SOURCE_KEYS :
for k , v in data [ attribute ] . items ( ) :
if k not in data [ 'groups' ] :
data [ 'groups' ] [ k ] = [ ]
data [ 'groups' ] [ k ] . extend ( [ join ( '$PROJ_DIR$' , file ) for file in v ] )
for k , v in data [ 'include_files' ] . items ( ) :
if k not in data [ 'groups' ] :
data [ 'groups' ] [ k ] = [ ]
data [ 'groups' ] [ k ] . extend ( [ join ( '$PROJ_DIR$' , file ) for file in v ] )
# sort groups
data [ 'groups' ] = OrderedDict ( sorted ( data [ 'groups' ] . items ( ) , key = lambda t : t [ 0 ] ) )
|
def dIbr_dV ( Yf , Yt , V ) :
"""Computes partial derivatives of branch currents w . r . t . voltage .
Ray Zimmerman , " dIbr _ dV . m " , MATPOWER , version 4.0b1,
PSERC ( Cornell ) , http : / / www . pserc . cornell . edu / matpower /"""
|
# nb = len ( V )
Vnorm = div ( V , abs ( V ) )
diagV = spdiag ( V )
diagVnorm = spdiag ( Vnorm )
dIf_dVa = Yf * 1j * diagV
dIf_dVm = Yf * diagVnorm
dIt_dVa = Yt * 1j * diagV
dIt_dVm = Yt * diagVnorm
# Compute currents .
If = Yf * V
It = Yt * V
return dIf_dVa , dIf_dVm , dIt_dVa , dIt_dVm , If , It
|
def addSwitch ( self , name = None ) :
'''Add a new switch to the topology .'''
|
if name is None :
while True :
name = 's' + str ( self . __snum )
self . __snum += 1
if name not in self . __nxgraph :
break
self . __addNode ( name , Switch )
return name
|
def create_runscript ( self , default = "/bin/bash" , force = False ) :
'''create _ entrypoint is intended to create a singularity runscript
based on a Docker entrypoint or command . We first use the Docker
ENTRYPOINT , if defined . If not , we use the CMD . If neither is found ,
we use function default .
Parameters
default : set a default entrypoint , if the container does not have
an entrypoint or cmd .
force : If true , use default and ignore Dockerfile settings'''
|
entrypoint = default
# Only look at Docker if not enforcing default
if force is False :
if self . entrypoint is not None :
entrypoint = '' . join ( self . entrypoint )
elif self . cmd is not None :
entrypoint = '' . join ( self . cmd )
# Entrypoint should use exec
if not entrypoint . startswith ( 'exec' ) :
entrypoint = "exec %s" % entrypoint
# Should take input arguments into account
if not re . search ( '"?[$]@"?' , entrypoint ) :
entrypoint = '%s "$@"' % entrypoint
return entrypoint
|
def get_data ( self ) :
"""Gets the asset content data .
return : ( osid . transport . DataInputStream ) - the length of the
content data
raise : OperationFailed - unable to complete request
* compliance : mandatory - - This method must be implemented . *"""
|
if not bool ( self . _my_map [ 'data' ] ) :
raise errors . IllegalState ( 'no data' )
dbase = JSONClientValidated ( 'repository' , runtime = self . _runtime ) . raw ( )
filesys = gridfs . GridFS ( dbase )
return DataInputStream ( filesys . get ( self . _my_map [ 'data' ] ) )
|
def extract ( self , dest_fldr , password = '' ) :
"""unzip the file contents to the dest _ folder
( create if it doesn ' t exist )
and then return the list of files extracted"""
|
# print ( ' extracting to ' + dest _ fldr )
if self . type == 'ZIP' :
self . _extract_zip ( dest_fldr , password )
elif self . type == 'GZ' :
self . _extract_gz ( dest_fldr , password )
elif self . type == 'TAR' :
self . _extract_tar ( dest_fldr , self . fname )
else :
raise ( 'Unknown archive file type' )
|
def parse_cartouche_text ( lines ) :
'''Parse text in cartouche format and return a reStructuredText equivalent
Args :
lines : A sequence of strings representing the lines of a single
docstring as read from the source by Sphinx . This string should be
in a format that can be parsed by cartouche .
Returns :
A list of lines containing the transformed docstring as
reStructuredText as produced by cartouche .
Raises :
RuntimeError : If the docstring cannot be parsed .'''
|
indent_lines = unindent ( lines )
indent_lines = pad_blank_lines ( indent_lines )
indent_lines = first_paragraph_indent ( indent_lines )
indent_paragraphs = gather_lines ( indent_lines )
parse_tree = group_paragraphs ( indent_paragraphs )
syntax_tree = extract_structure ( parse_tree )
result = syntax_tree . render_rst ( )
ensure_terminal_blank ( result )
return result
|
def _language_exclusions ( stem : LanguageStemRange , exclusions : List [ ShExDocParser . LanguageExclusionContext ] ) -> None :
"""languageExclusion = ' - ' LANGTAG STEM _ MARK ?"""
|
for excl in exclusions :
excl_langtag = LANGTAG ( excl . LANGTAG ( ) . getText ( ) [ 1 : ] )
stem . exclusions . append ( LanguageStem ( excl_langtag ) if excl . STEM_MARK ( ) else excl_langtag )
|
def query_cat_random ( catid , ** kwargs ) :
'''Get random lists of certain category .'''
|
num = kwargs . get ( 'limit' , 8 )
if catid == '' :
rand_recs = TabPost . select ( ) . order_by ( peewee . fn . Random ( ) ) . limit ( num )
else :
rand_recs = TabPost . select ( ) . join ( TabPost2Tag , on = ( TabPost . uid == TabPost2Tag . post_id ) ) . where ( ( TabPost . valid == 1 ) & ( TabPost2Tag . tag_id == catid ) ) . order_by ( peewee . fn . Random ( ) ) . limit ( num )
return rand_recs
|
def main ( argv = None ) :
"""Validate text parsed with FSM or validate an FSM via command line ."""
|
if argv is None :
argv = sys . argv
try :
opts , args = getopt . getopt ( argv [ 1 : ] , 'h' , [ 'help' ] )
except getopt . error as msg :
raise Usage ( msg )
for opt , _ in opts :
if opt in ( '-h' , '--help' ) :
print ( __doc__ )
print ( help_msg )
return 0
if not args or len ( args ) > 4 :
raise Usage ( 'Invalid arguments.' )
# If we have an argument , parse content of file and display as a template .
# Template displayed will match input template , minus any comment lines .
with open ( args [ 0 ] , 'r' ) as template :
fsm = TextFSM ( template )
print ( 'FSM Template:\n%s\n' % fsm )
if len ( args ) > 1 : # Second argument is file with example cli input .
# Prints parsed tabular result .
with open ( args [ 1 ] , 'r' ) as f :
cli_input = f . read ( )
table = fsm . ParseText ( cli_input )
print ( 'FSM Table:' )
result = str ( fsm . header ) + '\n'
for line in table :
result += str ( line ) + '\n'
print ( result , end = '' )
if len ( args ) > 2 : # Compare tabular result with data in third file argument .
# Exit value indicates if processed data matched expected result .
with open ( args [ 2 ] , 'r' ) as f :
ref_table = f . read ( )
if ref_table != result :
print ( 'Data mis-match!' )
return 1
else :
print ( 'Data match!' )
|
def getBottomRight ( self ) :
"""Retrieves a tuple with the x , y coordinates of the lower right point of the rect .
Requires the coordinates , width , height to be numbers"""
|
return ( float ( self . get_x ( ) ) + float ( self . get_width ( ) ) , float ( self . get_y ( ) ) )
|
def report_dead_hosting_devices ( self , context , hd_ids = None ) :
"""Report that a hosting device cannot be contacted ( presumed dead ) .
: param : context : session context
: param : hosting _ device _ ids : list of non - responding hosting devices
: return : None"""
|
cctxt = self . client . prepare ( )
cctxt . cast ( context , 'report_non_responding_hosting_devices' , host = self . host , hosting_device_ids = hd_ids )
|
def get_display_types ( ) :
"""Get ordered dict containing available display types from available luma
sub - projects .
: rtype : collections . OrderedDict"""
|
display_types = OrderedDict ( )
for namespace in get_supported_libraries ( ) :
display_types [ namespace ] = get_choices ( 'luma.{0}.device' . format ( namespace ) )
return display_types
|
def from_file ( filename = "feff.inp" ) :
"""Creates a Feff _ tag dictionary from a PARAMETER or feff . inp file .
Args :
filename : Filename for either PARAMETER or feff . inp file
Returns :
Feff _ tag object"""
|
with zopen ( filename , "rt" ) as f :
lines = list ( clean_lines ( f . readlines ( ) ) )
params = { }
eels_params = [ ]
ieels = - 1
ieels_max = - 1
for i , line in enumerate ( lines ) :
m = re . match ( r"([A-Z]+\d*\d*)\s*(.*)" , line )
if m :
key = m . group ( 1 ) . strip ( )
val = m . group ( 2 ) . strip ( )
val = Tags . proc_val ( key , val )
if key not in ( "ATOMS" , "POTENTIALS" , "END" , "TITLE" ) :
if key in [ "ELNES" , "EXELFS" ] :
ieels = i
ieels_max = ieels + 5
else :
params [ key ] = val
if ieels >= 0 :
if i >= ieels and i <= ieels_max :
if i == ieels + 1 :
if int ( line . split ( ) [ 1 ] ) == 1 :
ieels_max -= 1
eels_params . append ( line )
if eels_params :
if len ( eels_params ) == 6 :
eels_keys = [ 'BEAM_ENERGY' , 'BEAM_DIRECTION' , 'ANGLES' , 'MESH' , 'POSITION' ]
else :
eels_keys = [ 'BEAM_ENERGY' , 'ANGLES' , 'MESH' , 'POSITION' ]
eels_dict = { "ENERGY" : Tags . _stringify_val ( eels_params [ 0 ] . split ( ) [ 1 : ] ) }
for k , v in zip ( eels_keys , eels_params [ 1 : ] ) :
eels_dict [ k ] = str ( v )
params [ str ( eels_params [ 0 ] . split ( ) [ 0 ] ) ] = eels_dict
return Tags ( params )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.