signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def get_handler ( progname , fmt = None , datefmt = None , project_id = None , credentials = None , debug_thread_worker = False , ** _ ) :
"""Helper function to create a Stackdriver handler .
See ` ulogger . stackdriver . CloudLoggingHandlerBuilder ` for arguments
and supported keyword arguments .
Returns :
( obj ) : Instance of ` google . cloud . logging . handlers .
CloudLoggingHandler `""" | builder = CloudLoggingHandlerBuilder ( progname , fmt = fmt , datefmt = datefmt , project_id = project_id , credentials = credentials , debug_thread_worker = debug_thread_worker )
return builder . get_handler ( ) |
def data_file ( self ) :
"""Gets the full path to the file in which to save / load configured data .""" | path = os . getcwd ( ) + '/' + self . lazy_folder
return path + self . data_filename |
def tacacs_server_host_protocol ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
tacacs_server = ET . SubElement ( config , "tacacs-server" , xmlns = "urn:brocade.com:mgmt:brocade-aaa" )
host = ET . SubElement ( tacacs_server , "host" )
hostname_key = ET . SubElement ( host , "hostname" )
hostname_key . text = kwargs . pop ( 'hostname' )
use_vrf_key = ET . SubElement ( host , "use-vrf" )
use_vrf_key . text = kwargs . pop ( 'use_vrf' )
protocol = ET . SubElement ( host , "protocol" )
protocol . text = kwargs . pop ( 'protocol' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def estimateExportTilesSize ( self , exportBy , levels , tilePackage = False , exportExtent = "DEFAULTEXTENT" , areaOfInterest = None , async = True ) :
"""The estimateExportTilesSize operation is an asynchronous task that
allows estimation of the size of the tile package or the cache data
set that you download using the Export Tiles operation . This
operation can also be used to estimate the tile count in a tile
package and determine if it will exceced the maxExportTileCount
limit set by the administrator of the service . The result of this
operation is Map Service Job . This job response contains reference
to Map Service Result resource that returns the total size of the
cache to be exported ( in bytes ) and the number of tiles that will
be exported .
Inputs :
tilePackage - Allows estimating the size for either a tile package
or a cache raster data set . Specify the value true for tile
packages format and false for Cache Raster data set . The default
value is False
Values : True | False
exportExtent - The extent ( bounding box ) of the tile package or the
cache dataset to be exported . If extent does not include a spatial
reference , the extent values are assumed to be in the spatial
reference of the map . The default value is full extent of the
tiled map service .
Syntax : < xmin > , < ymin > , < xmax > , < ymax >
Example 1 : - 104,35.6 , - 94.32,41
exportBy - The criteria that will be used to select the tile
service levels to export . The values can be Level IDs , cache scales
or the Resolution ( in the case of image services ) .
Values : LevelID | Resolution | Scale
levels - Specify the tiled service levels for which you want to get
the estimates . The values should correspond to Level IDs , cache
scales or the Resolution as specified in exportBy parameter . The
values can be comma separated values or a range .
Example 1 : 1,2,3,4,5,6,7,8,9
Example 2 : 1-4,7-9
areaOfInterest - ( Optional ) The areaOfInterest polygon allows
exporting tiles within the specified polygon areas . This parameter
supersedes exportExtent parameter . Also excepts geometry . Polygon .
Example : { " features " : [ { " geometry " : { " rings " : [ [ [ - 100,35 ] ,
[ - 100,45 ] , [ - 90,45 ] , [ - 90,35 ] , [ - 100,35 ] ] ] ,
" spatialReference " : { " wkid " : 4326 } } } ] }
async - ( optional ) the estimate function is run asynchronously
requiring the tool status to be checked manually to force it to
run synchronously the tool will check the status until the
estimation completes . The default is True , which means the status
of the job and results need to be checked manually . If the value
is set to False , the function will wait until the task completes .
Values : True | False""" | url = self . _url + "/estimateExportTilesSize"
params = { "f" : "json" , "levels" : levels , "exportBy" : exportBy , "tilePackage" : tilePackage , "exportExtent" : exportExtent }
params [ "levels" ] = levels
if not areaOfInterest is None :
if isinstance ( areaOfInterest , Polygon ) :
template = { "features" : [ areaOfInterest . asDictionary ] }
params [ 'areaOfInterest' ] = template
else :
params [ 'areaOfInterest' ] = areaOfInterest
if async == True :
return self . _get ( url = url , param_dict = params , securityHandler = self . _securityHandler , proxy_url = self . _proxy_url , proxy_port = self . _proxy_port )
else :
exportJob = self . _get ( url = url , param_dict = params , securityHandler = self . _securityHandler , proxy_url = self . _proxy_url , proxy_port = self . _proxy_port )
jobUrl = "%s/jobs/%s" % ( url , exportJob [ 'jobId' ] )
gpJob = GPJob ( url = jobUrl , securityHandler = self . _securityHandler , proxy_port = self . _proxy_port , proxy_url = self . _proxy_url )
status = gpJob . jobStatus
while status != "esriJobSucceeded" :
if status in [ 'esriJobFailed' , 'esriJobCancelling' , 'esriJobCancelled' ] :
return gpJob . messages
else :
time . sleep ( 5 )
status = gpJob . jobStatus
return gpJob . results |
def get_user ( self , uid , disabled = False ) :
'''given a uid , returns
first _ name ( string ) : User ' s first name ,
image1 ( string ) : User ' s profile image ,
email ( string ) : User ' s email address ,
create _ date ( string ) : The creation date ,
last _ name ( string ) : User ' s last name ,
uid ( string ) : User ' s UID
You can pass disabled = True if you ' d like to get the user even if the user has been disabled .''' | path = "/api/v3/publisher/user/get"
data = { 'api_token' : self . api_token , 'aid' : self . app_id , 'uid' : uid , 'disabled' : disabled , }
r = requests . get ( self . base_url + path , data = data )
if r . status_code == 2 :
raise ValueError ( path + ":" + r . reason )
# An Auth issue
if r . status_code == 2004 :
return None
# no user found
res = json . loads ( r . content )
# great the error could be in the json
if res . has_key ( 'code' ) :
if res [ 'code' ] != 0 : # 403 = = auth error , 2004 = not found , 2 = access denied
if res [ 'code' ] == 2 :
raise ValueError ( path + ":" + res [ 'message' ] )
# An Auth issue
print res
return None
return res [ 'user' ] |
def get_dummy_request ( language = None ) :
"""Returns a Request instance populated with cms specific attributes .""" | if settings . ALLOWED_HOSTS and settings . ALLOWED_HOSTS != "*" :
host = settings . ALLOWED_HOSTS [ 0 ]
else :
host = Site . objects . get_current ( ) . domain
request = RequestFactory ( ) . get ( "/" , HTTP_HOST = host )
request . session = { }
request . LANGUAGE_CODE = language or settings . LANGUAGE_CODE
# Needed for plugin rendering .
request . current_page = None
if 'django.contrib.auth' in settings . INSTALLED_APPS :
from django . contrib . auth . models import AnonymousUser
request . user = AnonymousUser ( )
return request |
def channels_remove_owner ( self , room_id , user_id , ** kwargs ) :
"""Removes the role of owner from a user in the current channel .""" | return self . __call_api_post ( 'channels.removeOwner' , roomId = room_id , userId = user_id , kwargs = kwargs ) |
def hmget ( self , key , field , * fields , encoding = _NOTSET ) :
"""Get the values of all the given fields .""" | return self . execute ( b'HMGET' , key , field , * fields , encoding = encoding ) |
def title ( self , value ) :
"""Setter for * * self . _ _ title * * attribute .
: param value : Attribute value .
: type value : unicode""" | if value is not None :
assert type ( value ) is unicode , "'{0}' attribute: '{1}' type is not 'unicode'!" . format ( "title" , value )
self . __title = value |
def trial_end ( self , trial_job_id , success ) :
"""trial _ end
Parameters
trial _ job _ id : int
trial job id
success : bool
True if succssfully finish the experiment , False otherwise""" | if trial_job_id in self . running_history :
if success :
cnt = 0
history_sum = 0
self . completed_avg_history [ trial_job_id ] = [ ]
for each in self . running_history [ trial_job_id ] :
cnt += 1
history_sum += each
self . completed_avg_history [ trial_job_id ] . append ( history_sum / cnt )
self . running_history . pop ( trial_job_id )
else :
logger . warning ( 'trial_end: trial_job_id does not in running_history' ) |
def get_jid ( jid ) :
'''Return the information returned when the specified job id was executed''' | jid_dir = salt . utils . jid . jid_dir ( jid , _job_dir ( ) , __opts__ [ 'hash_type' ] )
serial = salt . payload . Serial ( __opts__ )
ret = { }
# Check to see if the jid is real , if not return the empty dict
if not os . path . isdir ( jid_dir ) :
return ret
for fn_ in os . listdir ( jid_dir ) :
if fn_ . startswith ( '.' ) :
continue
if fn_ not in ret :
retp = os . path . join ( jid_dir , fn_ , RETURN_P )
outp = os . path . join ( jid_dir , fn_ , OUT_P )
if not os . path . isfile ( retp ) :
continue
while fn_ not in ret :
try :
with salt . utils . files . fopen ( retp , 'rb' ) as rfh :
ret_data = serial . load ( rfh )
if not isinstance ( ret_data , dict ) or 'return' not in ret_data : # Convert the old format in which return . p contains the only return data to
# the new that is dict containing ' return ' and optionally ' retcode ' and
# ' success ' .
ret_data = { 'return' : ret_data }
ret [ fn_ ] = ret_data
if os . path . isfile ( outp ) :
with salt . utils . files . fopen ( outp , 'rb' ) as rfh :
ret [ fn_ ] [ 'out' ] = serial . load ( rfh )
except Exception as exc :
if 'Permission denied:' in six . text_type ( exc ) :
raise
return ret |
def SysRem ( time , flux , err , ncbv = 5 , niter = 50 , sv_win = 999 , sv_order = 3 , ** kwargs ) :
'''Applies : py : obj : ` SysRem ` to a given set of light curves .
: param array _ like time : The time array for all of the light curves
: param array _ like flux : A 2D array of the fluxes for each of the light curves , shape ` ( nfluxes , ntime ) `
: param array _ like err : A 2D array of the flux errors for each of the light curves , shape ` ( nfluxes , ntime ) `
: param int ncbv : The number of signals to recover . Default 5
: param int niter : The number of : py : obj : ` SysRem ` iterations to perform . Default 50
: param int sv _ win : The Savitsky - Golay filter window size . Default 999
: param int sv _ order : The Savitsky - Golay filter order . Default 3''' | nflx , tlen = flux . shape
# Get normalized fluxes
med = np . nanmedian ( flux , axis = 1 ) . reshape ( - 1 , 1 )
y = flux - med
# Compute the inverse of the variances
invvar = 1. / err ** 2
# The CBVs for this set of fluxes
cbvs = np . zeros ( ( ncbv , tlen ) )
# Recover ` ncbv ` components
for n in range ( ncbv ) : # Initialize the weights and regressors
c = np . zeros ( nflx )
a = np . ones ( tlen )
f = y * invvar
# Perform ` niter ` iterations
for i in range ( niter ) : # Compute the ` c ` vector ( the weights )
c = np . dot ( f , a ) / np . dot ( invvar , a ** 2 )
# Compute the ` a ` vector ( the regressors )
a = np . dot ( c , f ) / np . dot ( c ** 2 , invvar )
# Remove this component from all light curves
y -= np . outer ( c , a )
# Save this regressor after smoothing it a bit
if sv_win >= len ( a ) :
sv_win = len ( a ) - 1
if sv_win % 2 == 0 :
sv_win -= 1
cbvs [ n ] = savgol_filter ( a - np . nanmedian ( a ) , sv_win , sv_order )
return cbvs |
def _get_or_convert_magnitude ( self , mag_letter ) :
"""Takes input of the magnitude letter and ouputs the magnitude fetched from the catalogue or a converted value
: return :""" | allowed_mags = "UBVJIHKLMN"
catalogue_mags = 'BVIJHK'
if mag_letter not in allowed_mags or not len ( mag_letter ) == 1 :
raise ValueError ( "Magnitude letter must be a single letter in {0}" . format ( allowed_mags ) )
mag_str = 'mag' + mag_letter
mag_val = self . getParam ( mag_str )
if isNanOrNone ( mag_val ) and ed_params . estimateMissingValues : # then we need to estimate it !
# old style dict comprehension for python 2.6
mag_dict = dict ( ( 'mag' + letter , self . getParam ( 'mag' + letter ) ) for letter in catalogue_mags )
mag_class = Magnitude ( self . spectralType , ** mag_dict )
try :
mag_conversion = mag_class . convert ( mag_letter )
# logger . debug ( ' Star Class : Conversion to { 0 } successful , got { 1 } ' . format ( mag _ str , mag _ conversion ) )
self . flags . addFlag ( 'Estimated mag{0}' . format ( mag_letter ) )
return mag_conversion
except ValueError as e : # cant convert
logger . exception ( e )
# logger . debug ( ' Cant convert to { 0 } ' . format ( mag _ letter ) )
return np . nan
else : # logger . debug ( ' returning { 0 } = { 1 } from catalogue ' . format ( mag _ str , mag _ val ) )
return mag_val |
def calculate_slope ( x_coord1 , y_coord1 , x_coord2 , y_coord2 ) :
"""This Python function calculates the slope of a line defined by two points .
Examples :
> > > calculate _ slope ( 4 , 2 , 2 , 5)
-1.5
> > > calculate _ slope ( 2 , 4 , 4 , 6)
> > > calculate _ slope ( 1 , 2 , 4 , 2)
Args :
x _ coord1 , y _ coord1 : The x and y coordinates of the first point
x _ coord2 , y _ coord2 : The x and y coordinates of the second point
Returns :
The slope of the line defined by the two points .""" | return ( float ( ( y_coord2 - y_coord1 ) ) / ( x_coord2 - x_coord1 ) ) |
def parse_coverage_args ( argv ) :
"""Parse command line arguments , returning a dict of
valid options :
' coverage _ xml ' : COVERAGE _ XML ,
' html _ report ' : None | HTML _ REPORT ,
' external _ css _ file ' : None | CSS _ FILE ,
where ` COVERAGE _ XML ` , ` HTML _ REPORT ` , and ` CSS _ FILE ` are paths .
The path strings may or may not exist .""" | parser = argparse . ArgumentParser ( description = DESCRIPTION )
parser . add_argument ( 'coverage_xml' , type = str , help = COVERAGE_XML_HELP , nargs = '+' )
parser . add_argument ( '--html-report' , metavar = 'FILENAME' , type = str , default = None , help = HTML_REPORT_HELP )
parser . add_argument ( '--external-css-file' , metavar = 'FILENAME' , type = str , default = None , help = CSS_FILE_HELP , )
parser . add_argument ( '--compare-branch' , metavar = 'BRANCH' , type = str , default = 'origin/master' , help = COMPARE_BRANCH_HELP )
parser . add_argument ( '--fail-under' , metavar = 'SCORE' , type = float , default = '0' , help = FAIL_UNDER_HELP )
parser . add_argument ( '--ignore-staged' , action = 'store_true' , default = False , help = IGNORE_STAGED_HELP )
parser . add_argument ( '--ignore-unstaged' , action = 'store_true' , default = False , help = IGNORE_UNSTAGED_HELP )
parser . add_argument ( '--exclude' , metavar = 'EXCLUDE' , type = str , nargs = '+' , help = EXCLUDE_HELP )
parser . add_argument ( '--src-roots' , metavar = 'DIRECTORY' , type = str , nargs = '+' , default = [ 'src/main/java' , 'src/test/java' ] , help = SRC_ROOTS_HELP )
return vars ( parser . parse_args ( argv ) ) |
def traceback ( frame , parent = False ) :
"""Pick frame info from current caller ' s ` frame ` .
Args :
* frame : : type : ` frame ` instance , use : func : ` inspect . currentframe ` .
* parent : whether to get outer frame ( caller ) traceback info , : data : ` False ` by default .
Returns :
: class : ` inspect . Trackback ` instance from : data : ` frame ` or its parent frame .""" | # Traceback ( filename = ' < stdin > ' , lineno = 1 , function = ' < module > ' , code _ context = None , index = None )
if parent is True : # frame itself will always be placed @ the first index of its outerframes .
outers = inspect . getouterframes ( frame )
traceback = ( len ( outers ) == 1 ) and None or inspect . getframeinfo ( outers [ 1 ] [ 0 ] )
else :
traceback = inspect . getframeinfo ( frame )
return traceback |
def parse_parts ( self , file , boundary , content_length ) :
"""Generate ` ` ( ' file ' , ( name , val ) ) ` ` and
` ` ( ' form ' , ( name , val ) ) ` ` parts .""" | in_memory = 0
for ellt , ell in self . parse_lines ( file , boundary , content_length ) :
if ellt == _begin_file :
headers , name , filename = ell
is_file = True
guard_memory = False
filename , container = self . start_file_streaming ( filename , headers , content_length )
_write = container . write
elif ellt == _begin_form :
headers , name = ell
is_file = False
container = [ ]
_write = container . append
guard_memory = self . max_form_memory_size is not None
elif ellt == _cont :
_write ( ell )
# if we write into memory and there is a memory size limit we
# count the number of bytes in memory and raise an exception if
# there is too much data in memory .
if guard_memory :
in_memory += len ( ell )
if in_memory > self . max_form_memory_size :
self . in_memory_threshold_reached ( in_memory )
elif ellt == _end :
if is_file :
container . seek ( 0 )
yield ( 'file' , ( name , FileStorage ( container , filename , name , headers = headers ) ) )
else :
part_charset = self . get_part_charset ( headers )
yield ( 'form' , ( name , b'' . join ( container ) . decode ( part_charset , self . errors ) ) ) |
def get_dynamodb_type ( self , val ) :
"""Take a scalar Python value and return a string representing
the corresponding Amazon DynamoDB type . If the value passed in is
not a supported type , raise a TypeError .""" | dynamodb_type = None
if is_num ( val ) :
dynamodb_type = 'N'
elif is_str ( val ) :
dynamodb_type = 'S'
elif isinstance ( val , ( set , frozenset ) ) :
if False not in map ( is_num , val ) :
dynamodb_type = 'NS'
elif False not in map ( is_str , val ) :
dynamodb_type = 'SS'
if dynamodb_type is None :
raise TypeError ( 'Unsupported type "%s" for value "%s"' % ( type ( val ) , val ) )
return dynamodb_type |
def list_traces ( self , project_id = None , view = None , page_size = None , start_time = None , end_time = None , filter_ = None , order_by = None , page_token = None , ) :
"""Returns of a list of traces that match the filter conditions .
Args :
project _ id ( Optional [ str ] ) : ID of the Cloud project where the trace
data is stored .
view ( Optional [ ~ google . cloud . trace _ v1 . gapic . enums .
ListTracesRequest . ViewType ] ) : Type of data returned for traces
in the list . Default is ` ` MINIMAL ` ` .
page _ size ( Optional [ int ] ) : Maximum number of traces to return . If
not specified or < = 0 , the implementation selects a reasonable
value . The implementation may return fewer traces than the
requested page size .
start _ time ( Optional [ ~ datetime . datetime ] ) : Start of the time
interval ( inclusive ) during which the trace data was collected
from the application .
end _ time ( Optional [ ~ datetime . datetime ] ) : End of the time interval
( inclusive ) during which the trace data was collected from the
application .
filter _ ( Optional [ str ] ) : An optional filter for the request .
order _ by ( Optional [ str ] ) : Field used to sort the returned traces .
page _ token ( Optional [ str ] ) : opaque marker for the next " page " of
entries . If not passed , the API will return the first page of
entries .
Returns :
A : class : ` ~ google . api _ core . page _ iterator . Iterator ` of traces that
match the specified filter conditions .""" | if project_id is None :
project_id = self . project
if start_time is not None :
start_time = _datetime_to_pb_timestamp ( start_time )
if end_time is not None :
end_time = _datetime_to_pb_timestamp ( end_time )
return self . trace_api . list_traces ( project_id = project_id , view = view , page_size = page_size , start_time = start_time , end_time = end_time , filter_ = filter_ , order_by = order_by , page_token = page_token , ) |
def extractRuntime ( runtime_dirs ) :
"""Used to find the correct static lib name to pass to gcc""" | names = [ str ( item ) for name in runtime_dirs for item in os . listdir ( name ) ]
string = '\n' . join ( names )
result = extract ( RUNTIME_PATTERN , string , condense = True )
return result |
def generate_key ( self ) :
"""Ask mist . io to randomly generate a private ssh - key to be
used with the creation of a new Key
: returns : A string of a randomly generated ssh private key""" | req = self . request ( self . uri + "/keys" )
private_key = req . post ( ) . json ( )
return private_key [ 'priv' ] |
def get ( self ) :
"""Returns the spanning - tree configuration as a dict object
The dictionary object represents the entire spanning - tree
configuration derived from the nodes running config . This
includes both globally configuration attributes as well as
interfaces and instances . See the StpInterfaces and StpInstances
classes for the key / value pair definitions .
Note :
See the individual classes for detailed message structures
Returns :
A Python dictionary object of key / value pairs the represent
the entire supported spanning - tree configuration : :
" mode " : [ mstp , none ] ,
" interfaces " : { . . . } ,
" instances " : { . . . }""" | return dict ( interfaces = self . interfaces . getall ( ) , instances = self . instances . getall ( ) ) |
def get_template_substitution_values ( self , value ) :
"""Return value - related substitutions .""" | return { 'initial' : os . path . basename ( conditional_escape ( value ) ) , 'initial_url' : conditional_escape ( value . url ) , } |
def requires_private_key ( func ) :
"""Decorator for functions that require the private key to be defined .""" | def func_wrapper ( self , * args , ** kwargs ) :
if hasattr ( self , "_DiffieHellman__private_key" ) :
func ( self , * args , ** kwargs )
else :
self . generate_private_key ( )
func ( self , * args , ** kwargs )
return func_wrapper |
def get_container_setting ( name , container , settings ) :
'''Get the value of the setting for the IIS container .
. . versionadded : : 2016.11.0
Args :
name ( str ) : The name of the IIS container .
container ( str ) : The type of IIS container . The container types are :
AppPools , Sites , SslBindings
settings ( dict ) : A dictionary of the setting names and their values .
Returns :
dict : A dictionary of the provided settings and their values .
CLI Example :
. . code - block : : bash
salt ' * ' win _ iis . get _ container _ setting name = ' MyTestPool ' container = ' AppPools '
settings = " [ ' processModel . identityType ' ] "''' | ret = dict ( )
ps_cmd = list ( )
ps_cmd_validate = list ( )
container_path = r"IIS:\{0}\{1}" . format ( container , name )
if not settings :
log . warning ( 'No settings provided' )
return ret
ps_cmd . append ( r'$Settings = @{};' )
for setting in settings : # Build the commands to verify that the property names are valid .
ps_cmd_validate . extend ( [ 'Get-ItemProperty' , '-Path' , "'{0}'" . format ( container_path ) , '-Name' , "'{0}'" . format ( setting ) , '-ErrorAction' , 'Stop' , '|' , 'Out-Null;' ] )
# Some ItemProperties are Strings and others are ConfigurationAttributes .
# Since the former doesn ' t have a Value property , we need to account
# for this .
ps_cmd . append ( "$Property = Get-ItemProperty -Path '{0}'" . format ( container_path ) )
ps_cmd . append ( "-Name '{0}' -ErrorAction Stop;" . format ( setting ) )
ps_cmd . append ( r'if (([String]::IsNullOrEmpty($Property) -eq $False) -and' )
ps_cmd . append ( r"($Property.GetType()).Name -eq 'ConfigurationAttribute') {" )
ps_cmd . append ( r'$Property = $Property | Select-Object' )
ps_cmd . append ( r'-ExpandProperty Value };' )
ps_cmd . append ( "$Settings['{0}'] = [String] $Property;" . format ( setting ) )
ps_cmd . append ( r'$Property = $Null;' )
# Validate the setting names that were passed in .
cmd_ret = _srvmgr ( cmd = ps_cmd_validate , return_json = True )
if cmd_ret [ 'retcode' ] != 0 :
message = 'One or more invalid property names were specified for the provided container.'
raise SaltInvocationError ( message )
ps_cmd . append ( '$Settings' )
cmd_ret = _srvmgr ( cmd = ps_cmd , return_json = True )
try :
items = salt . utils . json . loads ( cmd_ret [ 'stdout' ] , strict = False )
if isinstance ( items , list ) :
ret . update ( items [ 0 ] )
else :
ret . update ( items )
except ValueError :
raise CommandExecutionError ( 'Unable to parse return data as Json.' )
return ret |
def _process_net_mhcii ( mhc_file , normal = False ) :
"""Process the results from running NetMHCIIpan binding predictions into a pandas dataframe .
: param str mhc _ file : Output file containing netmhciipan mhcii : peptide binding predictions
: param bool normal : Is this processing the results of a normal ?
: return : Results in a tabular format
: rtype : pandas . DataFrame""" | results = pandas . DataFrame ( columns = [ 'allele' , 'pept' , 'tumor_pred' , 'core' , 'peptide_name' ] )
with open ( mhc_file , 'r' ) as mf :
peptides = set ( )
# Get the allele from the first line and skip the second line
allele = re . sub ( '-DQB' , '/DQB' , mf . readline ( ) . strip ( ) )
_ = mf . readline ( )
for line in mf :
line = line . strip ( ) . split ( '\t' )
pept = line [ 1 ]
pred = line [ 5 ]
core = 'NOCORE'
peptide_name = line [ 2 ]
if float ( pred ) > 5.00 and not normal :
continue
results . loc [ len ( results ) ] = [ allele , pept , pred , core , peptide_name ]
results . drop_duplicates ( inplace = True )
return results |
def pause ( self , container ) :
"""Pauses all processes within a container .
Args :
container ( str ) : The container to pause
Raises :
: py : class : ` docker . errors . APIError `
If the server returns an error .""" | url = self . _url ( '/containers/{0}/pause' , container )
res = self . _post ( url )
self . _raise_for_status ( res ) |
def server_group_field_data ( request ) :
"""Returns a list of tuples of all server groups .
Generates a list of server groups available . And returns a list of
( id , name ) tuples .
: param request : django http request object
: return : list of ( id , name ) tuples""" | server_groups = server_group_list ( request )
if server_groups :
server_groups_list = [ ( sg . id , sg . name ) for sg in server_groups ]
server_groups_list . sort ( key = lambda obj : obj [ 1 ] )
return [ ( "" , _ ( "Select Server Group" ) ) , ] + server_groups_list
return [ ( "" , _ ( "No server groups available" ) ) , ] |
def downzip ( url , destination = './sample_data/' ) :
"""Download , unzip and delete .""" | # url = " http : / / 147.228.240.61 / queetech / sample - data / jatra _ 06mm _ jenjatra . zip "
logmsg = "downloading from '" + url + "'"
print ( logmsg )
logger . debug ( logmsg )
local_file_name = os . path . join ( destination , 'tmp.zip' )
urllibr . urlretrieve ( url , local_file_name )
datafile = zipfile . ZipFile ( local_file_name )
datafile . extractall ( destination )
remove ( local_file_name ) |
def reset ( self ) :
"""Reset the widget , and clear the scene .""" | self . minimum = None
self . maximum = None
self . start_time = None
# datetime , absolute start time
self . idx_current = None
self . idx_markers = [ ]
self . idx_annot = [ ]
if self . scene is not None :
self . scene . clear ( )
self . scene = None |
def warn_quirks ( message , recommend , pattern , index ) :
"""Warn quirks .""" | import traceback
import bs4
# noqa : F401
# Acquire source code line context
paths = ( MODULE , sys . modules [ 'bs4' ] . __path__ [ 0 ] )
tb = traceback . extract_stack ( )
previous = None
filename = None
lineno = None
for entry in tb :
if ( PY35 and entry . filename . startswith ( paths ) ) or ( not PY35 and entry [ 0 ] . startswith ( paths ) ) :
break
previous = entry
if previous :
filename = previous . filename if PY35 else previous [ 0 ]
lineno = previous . lineno if PY35 else previous [ 1 ]
# Format pattern to show line and column position
context , line = get_pattern_context ( pattern , index ) [ 0 : 2 ]
# Display warning
warnings . warn_explicit ( "\nCSS selector pattern:\n" + " {}\n" . format ( message ) + " This behavior is only allowed temporarily for Beautiful Soup's transition to Soup Sieve.\n" + " In order to confrom to the CSS spec, {}\n" . format ( recommend ) + " It is strongly recommended the selector be altered to conform to the CSS spec " + "as an exception will be raised for this case in the future.\n" + "pattern line {}:\n{}" . format ( line , context ) , QuirksWarning , filename , lineno ) |
def install_module ( self , install_optional = False , production_only = False , force = False , frozen_lockfile = True , node_paths = None ) :
"""Returns a command that when executed will install node package .
: param install _ optional : True to install optional dependencies .
: param production _ only : True to only install production dependencies , i . e .
ignore devDependencies .
: param force : True to force re - download dependencies .
: param frozen _ lockfile : True to disallow automatic update of lock files .
: param node _ paths : A list of path that should be included in $ PATH when
running installation .""" | args = self . _get_installation_args ( install_optional = install_optional , production_only = production_only , force = force , frozen_lockfile = frozen_lockfile )
return self . run_command ( args = args , node_paths = node_paths ) |
def get ( self , request , bot_id , id , format = None ) :
"""Get environment variable by id
serializer : EnvironmentVarSerializer
responseMessages :
- code : 401
message : Not authenticated""" | return super ( EnvironmentVarDetail , self ) . get ( request , bot_id , id , format ) |
def status ( self , ) :
"""The global status that summerizes all actions
The status will be calculated in the following order :
If any error occured , the status will be : data : ` ActionStatus . ERROR ` .
If any failure occured , the status will be : data : ` ActionStatus . FAILURE ` .
If all actions were successful or skipped , the status will be : data : ` ActonStatus . SUCCESS `
: returns : a status object that represents a summary of all actions
: rtype : : class : ` ActionStatus `
: raises : None""" | status = ActionStatus ( ActionStatus . SUCCESS , "All actions succeeded." )
for a in self . actions :
if a . status . value == ActionStatus . ERROR :
status = ActionStatus ( ActionStatus . ERROR , "Error: action \"%s\" raised an error!" % a . name , a . status . traceback )
break
if a . status . value == ActionStatus . FAILURE :
status = ActionStatus ( ActionStatus . FAILURE , "Action(s) failed!" )
return status |
def all_lemmas ( self ) :
'''A generator over all the lemmas in the GermaNet database .''' | for lemma_dict in self . _mongo_db . lexunits . find ( ) :
yield Lemma ( self , lemma_dict ) |
def to_number ( result_type , value , default = None , minimum = None , maximum = None ) :
"""Cast ` value ` to numeric ` result _ type ` if possible
Args :
result _ type ( type ) : Numerical type to convert to ( one of : int , float , . . . )
value ( str | unicode ) : Value to convert
default ( result _ type . _ _ class _ _ | None ) : Default to use ` value ` can ' t be turned into an int
minimum ( result _ type . _ _ class _ _ | None ) : If specified , result can ' t be below this minimum
maximum ( result _ type . _ _ class _ _ | None ) : If specified , result can ' t be above this maximum
Returns :
Corresponding numeric value""" | try :
return capped ( result_type ( value ) , minimum , maximum )
except ( TypeError , ValueError ) :
return default |
def extract_variables ( content ) :
"""extract all variables in content recursively .""" | if isinstance ( content , ( list , set , tuple ) ) :
variables = set ( )
for item in content :
variables = variables | extract_variables ( item )
return variables
elif isinstance ( content , dict ) :
variables = set ( )
for key , value in content . items ( ) :
variables = variables | extract_variables ( value )
return variables
elif isinstance ( content , LazyString ) :
return set ( regex_findall_variables ( content . raw_string ) )
return set ( ) |
def set_index ( self , schema , name , fields , ** index_options ) :
"""add an index to the table
schema - - Schema ( )
name - - string - - the name of the index
fields - - array - - the fields the index should be on
* * index _ options - - dict - - any index options that might be useful to create the index""" | with self . transaction ( ** index_options ) as connection :
index_options [ 'connection' ] = connection
self . _set_index ( schema , name , fields , ** index_options )
return True |
def coerce ( value ) :
"""Coerces a Bool , None , or int into Bit / Propositional form""" | if isinstance ( value , BoolCell ) :
return value
elif isinstance ( value , Cell ) :
raise CellConstructionFailure ( "Cannot convert %s to BoolCell" % type ( value ) )
elif value in [ 1 , T ] :
return BoolCell ( T )
elif value in [ 0 , - 1 , F ] :
return BoolCell ( F )
elif value in [ None , U ] :
return BoolCell ( U )
else :
raise CoercionFailure ( "Don't know how to coerce %d to Bool" % ( value ) ) |
def tabbed_parsing_character_generator ( tmp_dir , train ) :
"""Generate source and target data from a single file .""" | character_vocab = text_encoder . ByteTextEncoder ( )
filename = "parsing_{0}.pairs" . format ( "train" if train else "dev" )
pair_filepath = os . path . join ( tmp_dir , filename )
return text_problems . text2text_generate_encoded ( text_problems . text2text_txt_tab_iterator ( pair_filepath ) , character_vocab ) |
def nb_to_html_cells ( nb ) -> list :
"""Converts notebook to an iterable of BS4 HTML nodes . Images are inline .""" | html_exporter = HTMLExporter ( )
html_exporter . template_file = 'basic'
( body , resources ) = html_exporter . from_notebook_node ( nb )
return BeautifulSoup ( body , 'html.parser' ) . findAll ( 'div' , class_ = 'cell' ) |
def filter_flags ( use , use_expand_hidden , usemasked , useforced ) :
'''. . versionadded : : 2015.8.0
Filter function to remove hidden or otherwise not normally
visible USE flags from a list .
@ type use : list
@ param use : the USE flag list to be filtered .
@ type use _ expand _ hidden : list
@ param use _ expand _ hidden : list of flags hidden .
@ type usemasked : list
@ param usemasked : list of masked USE flags .
@ type useforced : list
@ param useforced : the forced USE flags .
@ rtype : list
@ return the filtered USE flags .''' | portage = _get_portage ( )
# clean out some environment flags , since they will most probably
# be confusing for the user
for f in use_expand_hidden :
f = f . lower ( ) + "_"
for x in use :
if f in x :
use . remove ( x )
# clean out any arch ' s
archlist = portage . settings [ "PORTAGE_ARCHLIST" ] . split ( )
for a in use [ : ] :
if a in archlist :
use . remove ( a )
# dbl check if any from usemasked or useforced are still there
masked = usemasked + useforced
for a in use [ : ] :
if a in masked :
use . remove ( a )
return use |
def get_items ( self , page = 1 , order_by = None , filters = None ) :
"""Fetch database for items matching .
Args :
page ( int ) :
which page will be sliced
slice size is ` ` self . per _ page ` ` .
order _ by ( str ) :
a field name to order query by .
filters ( dict ) :
a ` ` filter name ` ` : ` ` value ` ` dict .
Returns :
tuple with :
items , sliced by page * self . per _ page
total items without slice""" | start = ( page - 1 ) * self . per_page
query = self . get_query ( )
if order_by is not None :
query = query . order_by ( self . _get_field ( order_by ) )
if filters is not None :
query = self . _filter ( query , filters )
return query . offset ( start ) . limit ( self . per_page ) , self . count ( query ) |
def temporal_snr ( signal_dset , noise_dset , mask = None , prefix = 'temporal_snr.nii.gz' ) :
'''Calculates temporal SNR by dividing average signal of ` ` signal _ dset ` ` by SD of ` ` noise _ dset ` ` .
` ` signal _ dset ` ` should be a dataset that contains the average signal value ( i . e . , nothing that has
been detrended by removing the mean ) , and ` ` noise _ dset ` ` should be a dataset that has all possible
known signal fluctuations ( e . g . , task - related effects ) removed from it ( the residual dataset from a
deconvolve works well )''' | for d in [ ( 'mean' , signal_dset ) , ( 'stdev' , noise_dset ) ] :
new_d = nl . suffix ( d [ 1 ] , '_%s' % d [ 0 ] )
cmd = [ '3dTstat' , '-%s' % d [ 0 ] , '-prefix' , new_d ]
if mask :
cmd += [ '-mask' , mask ]
cmd += [ d [ 1 ] ]
nl . run ( cmd , products = new_d )
nl . calc ( [ nl . suffix ( signal_dset , '_mean' ) , nl . suffix ( noise_dset , '_stdev' ) ] , 'a/b' , prefix = prefix ) |
def fetch_friends ( self , user ) :
"""fetches the friends from twitter using the
information on django - social - auth models
user is an instance of UserSocialAuth
Returns :
collection of friend objects fetched from facebook""" | # Fetch the token key and secret
if USING_ALLAUTH :
social_app = SocialApp . objects . get_current ( 'twitter' )
consumer_key = social_app . key
consumer_secret = social_app . secret
oauth_token = SocialToken . objects . get ( account = user , app = social_app ) . token
oauth_token_secret = SocialToken . objects . get ( account = user , app = social_app ) . token_secret
else :
t = TwitterBackend ( )
tokens = t . tokens ( user )
oauth_token_secret = tokens [ 'oauth_token_secret' ]
oauth_token = tokens [ 'oauth_token' ]
# Consumer key and secret from settings
consumer_key = settings . TWITTER_CONSUMER_KEY
consumer_secret = settings . TWITTER_CONSUMER_SECRET
# now fetch the twitter friends using ` python - twitter `
api = twitter . Api ( consumer_key = consumer_key , consumer_secret = consumer_secret , access_token_key = oauth_token , access_token_secret = oauth_token_secret )
return api . GetFriends ( ) |
def compare ( string1 , string2 ) :
"""Compare two strings while protecting against timing attacks
: param str string1 : the first string
: param str string2 : the second string
: returns : True if the strings are equal , False if not
: rtype : : obj : ` bool `""" | if len ( string1 ) != len ( string2 ) :
return False
result = True
for c1 , c2 in izip ( string1 , string2 ) :
result &= c1 == c2
return result |
def joint_entropy_calc ( classes , table , POP ) :
"""Calculate joint entropy .
: param classes : confusion matrix classes
: type classes : list
: param table : confusion matrix table
: type table : dict
: param POP : population
: type POP : dict
: return : joint entropy as float""" | try :
result = 0
for i in classes :
for index , j in enumerate ( classes ) :
p_prime = table [ i ] [ j ] / POP [ i ]
if p_prime != 0 :
result += p_prime * math . log ( p_prime , 2 )
return - result
except Exception :
return "None" |
def _get_pygments_extensions ( ) :
"""Return all file type extensions supported by Pygments""" | # NOTE : Leave this import here to keep startup process fast !
import pygments . lexers as lexers
extensions = [ ]
for lx in lexers . get_all_lexers ( ) :
lexer_exts = lx [ 2 ]
if lexer_exts : # Reference : This line was included for leaving untrimmed the
# extensions not starting with ` * `
other_exts = [ le for le in lexer_exts if not le . startswith ( '*' ) ]
# Reference : This commented line was replaced by the following one
# to trim only extensions that start with ' * '
# lexer _ exts = [ le [ 1 : ] for le in lexer _ exts ]
lexer_exts = [ le [ 1 : ] for le in lexer_exts if le . startswith ( '*' ) ]
lexer_exts = [ le for le in lexer_exts if not le . endswith ( '_*' ) ]
extensions = extensions + list ( lexer_exts ) + list ( other_exts )
return sorted ( list ( set ( extensions ) ) ) |
def CreateApproval ( self , reason = None , notified_users = None , email_cc_addresses = None , keep_client_alive = False ) :
"""Create a new approval for the current user to access this client .""" | if not reason :
raise ValueError ( "reason can't be empty" )
if not notified_users :
raise ValueError ( "notified_users list can't be empty." )
approval = user_pb2 . ApiClientApproval ( reason = reason , notified_users = notified_users , email_cc_addresses = email_cc_addresses or [ ] )
args = user_pb2 . ApiCreateClientApprovalArgs ( client_id = self . client_id , approval = approval , keep_client_alive = keep_client_alive )
data = self . _context . SendRequest ( "CreateClientApproval" , args )
return ClientApproval ( data = data , username = self . _context . username , context = self . _context ) |
def launch_subshell ( self , shell_cls , cmd , args , * , prompt = None , context = { } ) :
"""Launch a subshell .
The doc string of the cmdloop ( ) method explains how shell histories and
history files are saved and restored .
The design of the _ ShellBase class encourage launching of subshells through
the subshell ( ) decorator function . Nonetheless , the user has the option
of directly launching subshells via this method .
Arguments :
shell _ cls : The _ ShellBase class object to instantiate and launch .
args : Arguments used to launch this subshell .
prompt : The name of the subshell . The default , None , means
to use the shell _ cls . _ _ name _ _ .
context : A dictionary to pass to the subshell as its context .
Returns :
' root ' : Inform the parent shell to keep exiting until the root shell
is reached .
' all ' : Exit the the command line .
False , None , or anything that are evaluated as False : Inform the
parent shell to stay in that parent shell .
An integer indicating the depth of shell to exit to . 0 = root shell .""" | # Save history of the current shell .
readline . write_history_file ( self . history_fname )
prompt = prompt if prompt else shell_cls . __name__
mode = _ShellBase . _Mode ( shell = self , cmd = cmd , args = args , prompt = prompt , context = context , )
shell = shell_cls ( batch_mode = self . batch_mode , debug = self . debug , mode_stack = self . _mode_stack + [ mode ] , pipe_end = self . _pipe_end , root_prompt = self . root_prompt , stdout = self . stdout , stderr = self . stderr , temp_dir = self . _temp_dir , )
# The subshell creates its own history context .
self . print_debug ( "Leave parent shell '{}'" . format ( self . prompt ) )
exit_directive = shell . cmdloop ( )
self . print_debug ( "Enter parent shell '{}': {}" . format ( self . prompt , exit_directive ) )
# Restore history . The subshell could have deleted the history file of
# this shell via ' history clearall ' .
readline . clear_history ( )
if os . path . isfile ( self . history_fname ) :
readline . read_history_file ( self . history_fname )
if not exit_directive is True :
return exit_directive |
def _dbg ( self , level , msg ) :
"""Write debugging output to sys . stderr .""" | if level <= self . debug :
print ( msg , file = sys . stderr ) |
def parse_docstring ( whatever_has_docstring ) :
'''Parse a docstring into a semmary ( first line ) and notes ( rest of it ) .''' | try :
summary , notes = whatever_has_docstring . __doc__ . split ( '\n' , 1 )
notes = dedent ( notes ) . replace ( '\n' , ' ' )
except ValueError :
summary = whatever_has_docstring . __doc__ . strip ( )
notes = ''
return summary , notes |
def dayproc ( st , lowcut , highcut , filt_order , samp_rate , starttime , debug = 0 , parallel = True , num_cores = False , ignore_length = False , seisan_chan_names = False , fill_gaps = True ) :
"""Wrapper for dayproc to parallel multiple traces in a stream .
Works in place on data . This is employed to ensure all parts of the data are processed in the same way .
: type st : obspy . core . stream . Stream
: param st : Stream to process ( can be trace ) .
: type lowcut : float
: param lowcut : Low cut in Hz for bandpass .
: type highcut : float
: param highcut : High cut in Hz for bandpass .
: type filt _ order : int
: param filt _ order : Corners for bandpass .
: type samp _ rate : float
: param samp _ rate : Desired sampling rate in Hz .
: type starttime : obspy . core . utcdatetime . UTCDateTime
: param starttime : Desired start - date of trace .
: type debug : int
: param debug : Debug output level from 0-5 , higher numbers = more output .
: type parallel : bool
: param parallel :
Set to True to process traces in parallel , this is often faster than
serial processing of traces : defaults to True .
: type num _ cores : int
: param num _ cores :
Control the number of cores for parallel processing , if set to False
then this will use all the cores .
: type ignore _ length : bool
: param ignore _ length : See warning below .
: type seisan _ chan _ names : bool
: param seisan _ chan _ names :
Whether channels are named like seisan channels ( which are two letters
rather than SEED convention of three ) - defaults to True .
: type fill _ gaps : bool
: param fill _ gaps : Whether to pad any gaps found with zeros or not .
: return : Processed stream .
: rtype : : class : ` obspy . core . stream . Stream `
. . note : :
If your data contain gaps you should * NOT * fill those gaps before
using the pre - process functions . The pre - process functions will fill
the gaps internally prior to processing , process the data , then re - fill
the gaps with zeros to ensure correlations are not incorrectly
calculated within gaps . If your data have gaps you should pass a merged
stream without the ` fill _ value ` argument ( e . g . : ` st = st . merge ( ) ` ) .
. . warning : :
Will fail if data are less than 19.2 hours long - this number is
arbitrary and is chosen to alert the user to the dangers of padding
to day - long , if you don ' t care you can ignore this error by setting
` ignore _ length = True ` . Use this option at your own risk ! It will also
warn any - time it has to pad data - if you see strange artifacts in your
detections , check whether the data have gaps .
. . rubric : : Example
> > > import obspy
> > > if int ( obspy . _ _ version _ _ . split ( ' . ' ) [ 0 ] ) > = 1:
. . . from obspy . clients . fdsn import Client
. . . else :
. . . from obspy . fdsn import Client
> > > from obspy import UTCDateTime
> > > from eqcorrscan . utils . pre _ processing import dayproc
> > > client = Client ( ' NCEDC ' )
> > > t1 = UTCDateTime ( 2012 , 3 , 26)
> > > t2 = t1 + 86400
> > > bulk _ info = [ ( ' BP ' , ' JCNB ' , ' 40 ' , ' SP1 ' , t1 , t2 ) ]
> > > st = client . get _ waveforms _ bulk ( bulk _ info )
> > > st _ keep = st . copy ( ) # Copy the stream for later examples
> > > # Example of bandpass filtering
> > > st = dayproc ( st = st , lowcut = 2 , highcut = 9 , filt _ order = 3 , samp _ rate = 20,
. . . starttime = t1 , debug = 0 , parallel = True , num _ cores = 2)
> > > print ( st [ 0 ] )
BP . JCNB . 40 . SP1 | 2012-03-26T00:00:00.00000Z - 2012-03-26T23:59:59.950000Z | 20.0 Hz , 1728000 samples
> > > # Example of lowpass filtering
> > > st = dayproc ( st = st , lowcut = None , highcut = 9 , filt _ order = 3 , samp _ rate = 20,
. . . starttime = t1 , debug = 0 , parallel = True , num _ cores = 2)
> > > print ( st [ 0 ] )
BP . JCNB . 40 . SP1 | 2012-03-26T00:00:00.00000Z - 2012-03-26T23:59:59.950000Z | 20.0 Hz , 1728000 samples
> > > # Example of highpass filtering
> > > st = dayproc ( st = st , lowcut = 2 , highcut = None , filt _ order = 3 , samp _ rate = 20,
. . . starttime = t1 , debug = 0 , parallel = True , num _ cores = 2)
> > > print ( st [ 0 ] )
BP . JCNB . 40 . SP1 | 2012-03-26T00:00:00.00000Z - 2012-03-26T23:59:59.950000Z | 20.0 Hz , 1728000 samples""" | # Add sanity check for filter
if isinstance ( st , Trace ) :
st = Stream ( st )
tracein = True
else :
tracein = False
if highcut and highcut >= 0.5 * samp_rate :
raise IOError ( 'Highcut must be lower than the nyquist' )
if debug > 4 :
parallel = False
# Set the start - time to a day start - cope with
if starttime is None :
startdates = [ ]
for tr in st :
if abs ( tr . stats . starttime - ( UTCDateTime ( tr . stats . starttime . date ) + 86400 ) ) < tr . stats . delta : # If the trace starts within 1 sample of the next day , use the
# next day as the startdate
startdates . append ( ( tr . stats . starttime + 86400 ) . date )
debug_print ( '{0} starts within 1 sample of the next day, using this ' 'time {1}' . format ( tr . id , ( tr . stats . starttime + 86400 ) . date ) , 2 , debug )
else :
startdates . append ( tr . stats . starttime . date )
# Check that all traces start on the same date . . .
if not len ( set ( startdates ) ) == 1 :
raise NotImplementedError ( 'Traces start on different days' )
starttime = UTCDateTime ( startdates [ 0 ] )
if parallel :
if not num_cores :
num_cores = cpu_count ( )
if num_cores > len ( st ) :
num_cores = len ( st )
pool = Pool ( processes = num_cores )
results = [ pool . apply_async ( process , ( tr , ) , { 'lowcut' : lowcut , 'highcut' : highcut , 'filt_order' : filt_order , 'samp_rate' : samp_rate , 'debug' : debug , 'starttime' : starttime , 'clip' : True , 'ignore_length' : ignore_length , 'length' : 86400 , 'seisan_chan_names' : seisan_chan_names , 'fill_gaps' : fill_gaps } ) for tr in st ]
pool . close ( )
try :
stream_list = [ p . get ( ) for p in results ]
except KeyboardInterrupt as e : # pragma : no cover
pool . terminate ( )
raise e
pool . join ( )
st = Stream ( stream_list )
else :
for i , tr in enumerate ( st ) :
st [ i ] = process ( tr = tr , lowcut = lowcut , highcut = highcut , filt_order = filt_order , samp_rate = samp_rate , debug = debug , starttime = starttime , clip = True , length = 86400 , ignore_length = ignore_length , seisan_chan_names = seisan_chan_names , fill_gaps = fill_gaps )
for tr in st :
if len ( tr . data ) == 0 :
st . remove ( tr )
if tracein :
st . merge ( )
return st [ 0 ]
return st |
def _schedule_processing_blocks ( self ) :
"""Schedule Processing Blocks for execution .""" | LOG . info ( 'Starting to Schedule Processing Blocks.' )
while True :
time . sleep ( 0.5 )
if not self . _queue :
continue
if self . _num_pbcs >= self . _max_pbcs :
LOG . warning ( 'Resource limit reached!' )
continue
_inspect = Inspect ( app = APP )
if self . _queue and _inspect . active ( ) is not None :
next_pb = self . _queue [ - 1 ]
LOG . info ( 'Considering %s for execution...' , next_pb [ 2 ] )
utc_now = datetime . datetime . utcnow ( )
time_in_queue = ( utc_now - datetime_from_isoformat ( next_pb [ 4 ] ) )
if time_in_queue . total_seconds ( ) >= 10 :
item = self . _queue . get ( )
LOG . info ( '------------------------------------' )
LOG . info ( '>>> Executing %s! <<<' , item )
LOG . info ( '------------------------------------' )
execute_processing_block . delay ( item )
self . _num_pbcs += 1
else :
LOG . info ( 'Waiting for resources for %s' , next_pb [ 2 ] ) |
def cov ( self , ddof = None , bias = 0 ) :
'''The covariance matrix from the aggregate sample . It accepts an
optional parameter for the degree of freedoms .
: parameter ddof : If not ` ` None ` ` normalization is by ( N - ddof ) , where N is
the number of observations ; this overrides the value implied by bias .
The default value is None .''' | N = self . n
M = N if bias else N - 1
M = M if ddof is None else N - ddof
return ( self . sxx - outer ( self . sx , self . sx ) / N ) / M |
def grouper ( n , iterable , fillvalue = None ) :
"grouper ( 3 , ' ABCDEFG ' , ' x ' ) - - > ABC DEF Gxx" | args = [ iter ( iterable ) ] * n
return itertools . izip_longest ( fillvalue = fillvalue , * args ) |
def get_resources ( self , types = None , names = None , languages = None ) :
"""Get resources .
types = a list of resource types to search for ( None = all )
names = a list of resource names to search for ( None = all )
languages = a list of resource languages to search for ( None = all )
Return a dict of the form { type _ : { name : { language : data } } } which
might also be empty if no matching resources were found .""" | return GetResources ( self . filename , types , names , languages ) |
def regex_findall_variables ( content ) :
"""extract all variable names from content , which is in format $ variable
Args :
content ( str ) : string content
Returns :
list : variables list extracted from string content
Examples :
> > > regex _ findall _ variables ( " $ variable " )
[ " variable " ]
> > > regex _ findall _ variables ( " / blog / $ postid " )
[ " postid " ]
> > > regex _ findall _ variables ( " / $ var1 / $ var2 " )
[ " var1 " , " var2 " ]
> > > regex _ findall _ variables ( " abc " )""" | try :
vars_list = [ ]
for var_tuple in variable_regex_compile . findall ( content ) :
vars_list . append ( var_tuple [ 0 ] or var_tuple [ 1 ] )
return vars_list
except TypeError :
return [ ] |
def increment ( self ) :
"""Increment the last permutation we returned to the next .""" | # Increment position from the deepest place of the tree first .
for index in reversed ( range ( self . depth ) ) :
self . indexes [ index ] += 1
# We haven ' t reached the end of board , no need to adjust upper
# level .
if self . indexes [ index ] < self . range_size :
break
# We ' ve reached the end of board . Reset current level and increment
# the upper level .
self . indexes [ index ] = 0
# Now that we incremented our indexes , we need to deduplicate positions
# shering the same UIDs , by aligning piece ' s indexes to their parents .
# This works thanks to the sort performed on self . pieces
# initialization . See # 7.
for i in range ( self . depth - 1 ) :
if ( self . pieces [ i ] == self . pieces [ i + 1 ] ) and ( self . indexes [ i ] > self . indexes [ i + 1 ] ) :
self . indexes [ i + 1 ] = self . indexes [ i ] |
def addParts ( parentPart , childPath , count , index ) :
"""BUILD A hierarchy BY REPEATEDLY CALLING self METHOD WITH VARIOUS childPaths
count IS THE NUMBER FOUND FOR self PATH""" | if index == None :
index = 0
if index == len ( childPath ) :
return
c = childPath [ index ]
parentPart . count = coalesce ( parentPart . count , 0 ) + count
if parentPart . partitions == None :
parentPart . partitions = FlatList ( )
for i , part in enumerate ( parentPart . partitions ) :
if part . name == c . name :
addParts ( part , childPath , count , index + 1 )
return
parentPart . partitions . append ( c )
addParts ( c , childPath , count , index + 1 ) |
def from_dynacRepr ( cls , pynacRepr ) :
"""Construct a ` ` Set4DAperture ` ` instance from the Pynac lattice element""" | energyDefnFlag = int ( pynacRepr [ 1 ] [ 0 ] [ 0 ] )
energy = float ( pynacRepr [ 1 ] [ 0 ] [ 1 ] )
phase = float ( pynacRepr [ 1 ] [ 0 ] [ 2 ] )
x = float ( pynacRepr [ 1 ] [ 0 ] [ 3 ] )
y = float ( pynacRepr [ 1 ] [ 0 ] [ 4 ] )
radius = float ( pynacRepr [ 1 ] [ 0 ] [ 5 ] )
return cls ( energy , phase , x , y , radius , energyDefnFlag ) |
def run ( self ) :
"""Load table data to : class : ` EuroStatsValue ` objects""" | # - - start documentation include : eurostats - run - 1
# create a new indicator metadata object
indicator = models . EuroStatIndicator ( number = self . number , description = self . description , url = "http://ec.europa.eu/eurostat/web/products-datasets/-/tgs" + self . number )
# add / commit to get the object ID filled
self . session . add ( indicator )
self . session . commit ( )
# - - end documentation include : eurostats - run - 1
# - - start documentation include : eurostats - run - 2
# load data from input file task
df = next ( self . requires ( ) ) . load ( key_filter = self . key_filter , header_preproc = self . header_preproc )
# Transform data : DataFrame from loading has NUTS2 key and years as columns .
# Index by key , then stack years as second level of index . Reset the index
# to get year and key as regular columns , with one value column left .
values = df . set_index ( 'key' ) . stack ( )
values . index . levels [ 1 ] . name = 'year'
values . name = 'value'
df = values . reset_index ( )
# - - end documentation include : eurostats - run - 2
# - - start documentation include : eurostats - run - 3
# get current max ID for EuroStatValue objects , for manual ID generation
max_id = models . EuroStatValue . get_max_id ( self . session )
# append an ID column , starting with the current max ID of the object class plus one
df [ 'id' ] = list ( range ( max_id + 1 , max_id + 1 + len ( df ) ) )
# - - end documentation include : eurostats - run - 3
# - - start documentation include : eurostats - run - 4
# append indicator ID ( constant )
df [ 'indicator_id' ] = indicator . id
# append region ID column , by mapping NUTS2 region keys to DB object IDs
regions = self . client . df_query ( self . session . query ( models . NUTS2Region ) ) . set_index ( 'key' ) [ 'id' ]
df [ 'region_id' ] = df [ 'key' ] . map ( regions )
# drop columns that are not part of the data model
df = df . drop ( [ 'key' ] , axis = 1 )
# type : pd . DataFrame
# - - end documentation include : eurostats - run - 4
# - - start documentation include : eurostats - run - 5
# store , done
df . to_sql ( name = models . EuroStatValue . __tablename__ , con = client . get_client ( ) . engine , if_exists = 'append' , index = False )
self . done ( ) |
def gaussian_tuple_prior_for_arguments ( self , arguments ) :
"""Parameters
arguments : { Prior : float }
A dictionary of arguments
Returns
tuple _ prior : TuplePrior
A new tuple prior with gaussian priors""" | tuple_prior = TuplePrior ( )
for prior_tuple in self . prior_tuples :
setattr ( tuple_prior , prior_tuple . name , arguments [ prior_tuple . prior ] )
return tuple_prior |
def _setint ( self , int_ , length = None ) :
"""Reset the bitstring to have given signed int interpretation .""" | # If no length given , and we ' ve previously been given a length , use it .
if length is None and hasattr ( self , 'len' ) and self . len != 0 :
length = self . len
if length is None or length == 0 :
raise CreationError ( "A non-zero length must be specified with an int initialiser." )
if int_ >= ( 1 << ( length - 1 ) ) or int_ < - ( 1 << ( length - 1 ) ) :
raise CreationError ( "{0} is too large a signed integer for a bitstring of length {1}. " "The allowed range is [{2}, {3}]." , int_ , length , - ( 1 << ( length - 1 ) ) , ( 1 << ( length - 1 ) ) - 1 )
if int_ >= 0 :
self . _setuint ( int_ , length )
return
# TODO : We should decide whether to just use the _ setuint , or to do the bit flipping ,
# based upon which will be quicker . If the - ive number is less than half the maximum
# possible then it ' s probably quicker to do the bit flipping . . .
# Do the 2 ' s complement thing . Add one , set to minus number , then flip bits .
int_ += 1
self . _setuint ( - int_ , length )
self . _invert_all ( ) |
def url_decode_stream ( stream , charset = 'utf-8' , decode_keys = False , include_empty = True , errors = 'replace' , separator = '&' , cls = None , limit = None , return_iterator = False ) :
"""Works like : func : ` url _ decode ` but decodes a stream . The behavior
of stream and limit follows functions like
: func : ` ~ werkzeug . wsgi . make _ line _ iter ` . The generator of pairs is
directly fed to the ` cls ` so you can consume the data while it ' s
parsed .
. . versionadded : : 0.8
: param stream : a stream with the encoded querystring
: param charset : the charset of the query string .
: param decode _ keys : set to ` True ` if you want the keys to be decoded
as well .
: param include _ empty : Set to ` False ` if you don ' t want empty values to
appear in the dict .
: param errors : the decoding error behavior .
: param separator : the pair separator to be used , defaults to ` ` & ` `
: param cls : an optional dict class to use . If this is not specified
or ` None ` the default : class : ` MultiDict ` is used .
: param limit : the content length of the URL data . Not necessary if
a limited stream is provided .
: param return _ iterator : if set to ` True ` the ` cls ` argument is ignored
and an iterator over all decoded pairs is
returned""" | if return_iterator :
cls = lambda x : x
elif cls is None :
cls = MultiDict
pair_iter = make_chunk_iter ( stream , separator , limit )
return cls ( _url_decode_impl ( pair_iter , charset , decode_keys , include_empty , errors ) ) |
def build_vcf_inversion ( x1 , x2 , genome_2bit ) :
"""Provide representation of inversion from BedPE breakpoints .""" | id1 = "hydra{0}" . format ( x1 . name )
start_coords = sorted ( [ x1 . start1 , x1 . end1 , x2 . start1 , x2 . end1 ] )
end_coords = sorted ( [ x1 . start2 , x1 . end2 , x2 . start2 , x2 . start2 ] )
start_pos = ( start_coords [ 1 ] + start_coords [ 2 ] ) // 2
end_pos = ( end_coords [ 1 ] + end_coords [ 2 ] ) // 2
base1 = genome_2bit [ x1 . chrom1 ] . get ( start_pos , start_pos + 1 ) . upper ( )
info = "SVTYPE=INV;IMPRECISE;CIPOS={cip1},{cip2};CIEND={cie1},{cie2};" "END={end};SVLEN={length}" . format ( cip1 = start_pos - start_coords [ 0 ] , cip2 = start_coords [ - 1 ] - start_pos , cie1 = end_pos - end_coords [ 0 ] , cie2 = end_coords [ - 1 ] - end_pos , end = end_pos , length = end_pos - start_pos )
return VcfLine ( x1 . chrom1 , start_pos , id1 , base1 , "<INV>" , info ) |
def create_cfg_segment ( filename , filecontent , description , auth , url ) :
"""Takes a str into var filecontent which represents the entire content of a configuration
segment , or partial configuration file . Takes a str into var description which represents the
description of the configuration segment
: param filename : str containing the name of the configuration segment .
: param filecontent : str containing the entire contents of the configuration segment
: param description : str contrianing the description of the configuration segment
: param auth : requests auth object # usually auth . creds from auth pyhpeimc . auth . class
: param url : base url of IMC RS interface # usually auth . url from pyhpeimc . auth . authclass
: return : If successful , Boolena of type True
: rtype : Boolean
> > > from pyhpeimc . auth import *
> > > from pyhpeimc . plat . icc import *
> > > auth = IMCAuth ( " http : / / " , " 10.101.0.203 " , " 8080 " , " admin " , " admin " )
> > > filecontent = ' sample file content '
> > > create _ new _ file = create _ cfg _ segment ( ' CW7SNMP . cfg ' ,
filecontent ,
' My New Template ' ,
auth . creds ,
auth . url )
> > > template _ id = get _ template _ id ( ' CW7SNMP . cfg ' , auth . creds , auth . url )
> > > assert type ( template _ id ) is str""" | payload = { "confFileName" : filename , "confFileType" : "2" , "cfgFileParent" : "-1" , "confFileDesc" : description , "content" : filecontent }
f_url = url + "/imcrs/icc/confFile"
response = requests . post ( f_url , data = ( json . dumps ( payload ) ) , auth = auth , headers = HEADERS )
try :
if response . status_code == 201 :
print ( "Template successfully created" )
return response . status_code
elif response . status_code is not 201 :
return response . status_code
except requests . exceptions . RequestException as error :
return "Error:\n" + str ( error ) + " create_cfg_segment: An Error has occured" |
def validate ( self , document ) :
"""Check input for Python syntax errors .""" | # When the input starts with Ctrl - Z , always accept . This means EOF in a
# Python REPL .
if document . text . startswith ( '\x1a' ) :
return
try :
if self . get_compiler_flags :
flags = self . get_compiler_flags ( )
else :
flags = 0
compile ( document . text , '<input>' , 'exec' , flags = flags , dont_inherit = True )
except SyntaxError as e : # Note , the ' or 1 ' for offset is required because Python 2.7
# gives ` None ` as offset in case of ' 4 = 4 ' as input . ( Looks like
# fixed in Python 3 . )
index = document . translate_row_col_to_index ( e . lineno - 1 , ( e . offset or 1 ) - 1 )
raise ValidationError ( index , 'Syntax Error' )
except TypeError as e : # e . g . " compile ( ) expected string without null bytes "
raise ValidationError ( 0 , str ( e ) )
except ValueError as e : # In Python 2 , compiling " \ x9 " ( an invalid escape sequence ) raises
# ValueError instead of SyntaxError .
raise ValidationError ( 0 , 'Syntax Error: %s' % e ) |
def clean ( text , cls = None , ** kwargs ) :
"""Public facing function to clean ` ` text ` ` using the scrubber ` ` cls ` ` by
replacing all personal information with ` ` { { PLACEHOLDERS } } ` ` .""" | cls = cls or Scrubber
scrubber = cls ( )
return scrubber . clean ( text , ** kwargs ) |
def _intersection_with_dsis ( self , dsis ) :
"""Intersection with another : class : ` DiscreteStridedIntervalSet ` .
: param dsis : The other operand .
: return :""" | new_si_set = set ( )
for si in dsis . _si_set :
r = self . _intersection_with_si ( si )
if isinstance ( r , StridedInterval ) :
if not r . is_empty :
new_si_set . add ( r )
else : # r is a DiscreteStridedIntervalSet
new_si_set |= r . _si_set
if len ( new_si_set ) :
ret = DiscreteStridedIntervalSet ( bits = self . bits , si_set = new_si_set )
return ret . normalize ( )
else :
return StridedInterval . empty ( self . bits ) |
def _add_months ( self , date , months ) :
"""Add ` ` months ` ` months to ` ` date ` ` .
Unfortunately we can ' t use timedeltas to add months because timedelta counts in days
and there ' s no foolproof way to add N months in days without counting the number of
days per month .""" | year = date . year + ( date . month + months - 1 ) // 12
month = ( date . month + months - 1 ) % 12 + 1
return datetime . date ( year = year , month = month , day = 1 ) |
def get_text_for_html ( html_content ) :
'''Take the HTML content ( from , for example , an email )
and construct a simple plain text version of that content
( for example , for inclusion in a multipart email message ) .''' | soup = BeautifulSoup ( html_content )
# kill all script and style elements
for script in soup ( [ "script" , "style" ] ) :
script . extract ( )
# rip it out
# Replace all links with HREF with the link text and the href in brackets
for a in soup . findAll ( 'a' , href = True ) :
a . replaceWith ( '%s <%s>' % ( a . string , a . get ( 'href' ) ) )
# get text
text = soup . get_text ( )
# break into lines and remove leading and trailing space on each
lines = ( line . strip ( ) for line in text . splitlines ( ) )
# break multi - headlines into a line each
chunks = ( phrase . strip ( ) for line in lines for phrase in line . split ( " " ) )
# drop blank lines
text = '\n' . join ( chunk for chunk in chunks if chunk )
return text |
def get ( self , url : StrOrURL , * , allow_redirects : bool = True , ** kwargs : Any ) -> '_RequestContextManager' :
"""Perform HTTP GET request .""" | return _RequestContextManager ( self . _request ( hdrs . METH_GET , url , allow_redirects = allow_redirects , ** kwargs ) ) |
def insertBefore ( self , node : AbstractNode , ref_node : AbstractNode ) -> AbstractNode :
"""Insert a node just before the reference node .""" | return self . _insert_before ( node , ref_node ) |
def imageFields ( self ) :
"""Returns field names of image columns .
: return : a list of field names .
. . versionadded : : 2.3.0""" | if self . _imageFields is None :
ctx = SparkContext . _active_spark_context
self . _imageFields = list ( ctx . _jvm . org . apache . spark . ml . image . ImageSchema . imageFields ( ) )
return self . _imageFields |
def nfa_word_acceptance ( nfa : dict , word : list ) -> bool :
"""Checks if a given word is accepted by a NFA .
The word w is accepted by a NFA if exists at least an
accepting run on w .
: param dict nfa : input NFA ;
: param list word : list of symbols ∈ nfa [ ' alphabet ' ] ;
: return : * ( bool ) * , True if the word is accepted , False otherwise .""" | current_level = set ( )
current_level = current_level . union ( nfa [ 'initial_states' ] )
next_level = set ( )
for action in word :
for state in current_level :
if ( state , action ) in nfa [ 'transitions' ] :
next_level . update ( nfa [ 'transitions' ] [ state , action ] )
if len ( next_level ) < 1 :
return False
current_level = next_level
next_level = set ( )
if current_level . intersection ( nfa [ 'accepting_states' ] ) :
return True
else :
return False |
def control_circuit_breakers ( self , mode = None ) :
"""Opens or closes all circuit breakers of all MV grids .
Args
mode : str
Set mode = ' open ' to open , mode = ' close ' to close""" | for grid_district in self . mv_grid_districts ( ) :
if mode == 'open' :
grid_district . mv_grid . open_circuit_breakers ( )
elif mode == 'close' :
grid_district . mv_grid . close_circuit_breakers ( )
else :
raise ValueError ( '\'mode\' is invalid.' )
if mode == 'open' :
logger . info ( '=====> MV Circuit Breakers opened' )
elif mode == 'close' :
logger . info ( '=====> MV Circuit Breakers closed' ) |
def extract_body ( mail , types = None , field_key = 'copiousoutput' ) :
"""Returns a string view of a Message .
If the ` types ` argument is set then any encoding types there will be used
as the prefered encoding to extract . If ` types ` is None then
: ref : ` prefer _ plaintext < prefer - plaintext > ` will be consulted ; if it is True
then text / plain parts will be returned , if it is false then text / html will
be returned if present or text / plain if there are no text / html parts .
: param mail : the mail to use
: type mail : : class : ` email . Message `
: param types : mime content types to use for body string
: type types : list [ str ]
: returns : The combined text of any parts to be used
: rtype : str""" | preferred = 'text/plain' if settings . get ( 'prefer_plaintext' ) else 'text/html'
has_preferred = False
# see if the mail has our preferred type
if types is None :
has_preferred = list ( typed_subpart_iterator ( mail , * preferred . split ( '/' ) ) )
body_parts = [ ]
for part in mail . walk ( ) : # skip non - leaf nodes in the mail tree
if part . is_multipart ( ) :
continue
ctype = part . get_content_type ( )
if types is not None :
if ctype not in types :
continue
cd = part . get ( 'Content-Disposition' , '' )
if cd . startswith ( 'attachment' ) :
continue
# if the mail has our preferred type , we only keep this type
# note that if types ! = None , has _ preferred always stays False
if has_preferred and ctype != preferred :
continue
if ctype == 'text/plain' :
body_parts . append ( string_sanitize ( remove_cte ( part , as_string = True ) ) )
else :
rendered_payload = render_part ( part )
if rendered_payload : # handler had output
body_parts . append ( string_sanitize ( rendered_payload ) )
# mark as attachment
elif cd :
part . replace_header ( 'Content-Disposition' , 'attachment; ' + cd )
else :
part . add_header ( 'Content-Disposition' , 'attachment;' )
return u'\n\n' . join ( body_parts ) |
def Send ( self , url , opname , pyobj , nsdict = { } , soapaction = None , chain = None , ** kw ) :
"""Returns a ProcessingChain which needs to be passed to Receive if
Send is being called consecutively .""" | url = url or self . url
cookies = None
if chain is not None :
cookies = chain . flow . cookies
d = { }
d . update ( self . nsdict )
d . update ( nsdict )
if soapaction is not None :
self . addHTTPHeader ( 'SOAPAction' , soapaction )
chain = self . factory . newInstance ( )
soapdata = chain . processRequest ( pyobj , nsdict = nsdict , soapaction = soapaction , ** kw )
if self . trace :
print >> self . trace , "_" * 33 , time . ctime ( time . time ( ) ) , "REQUEST:"
print >> self . trace , soapdata
f = getPage ( str ( url ) , contextFactory = self . contextFactory , postdata = soapdata , agent = self . agent , method = 'POST' , headers = self . getHTTPHeaders ( ) , cookies = cookies )
if isinstance ( f , Failure ) :
return f
chain . flow = f
self . chain = chain
return chain |
def validate ( cls , job_config ) :
"""Validates relevant parameters .
This method can validate fields which it deems relevant .
Args :
job _ config : an instance of map _ job . JobConfig .
Raises :
errors . BadReaderParamsError : required parameters are missing or invalid .""" | if job_config . input_reader_cls != cls :
raise errors . BadReaderParamsError ( "Expect input reader class %r, got %r." % ( cls , job_config . input_reader_cls ) ) |
def sign ( self , encoded ) :
"""Return authentication signature of encoded bytes""" | signature = self . _hmac . copy ( )
signature . update ( encoded )
return signature . hexdigest ( ) . encode ( 'utf-8' ) |
def mavlink_packet ( self , m ) :
'''handle REMOTE _ LOG _ DATA _ BLOCK packets''' | now = time . time ( )
if m . get_type ( ) == 'REMOTE_LOG_DATA_BLOCK' :
if self . stopped : # send a stop packet every second until the other end gets the idea :
if now - self . time_last_stop_packet_sent > 1 :
if self . log_settings . verbose :
print ( "DFLogger: Sending stop packet" )
self . master . mav . remote_log_block_status_send ( mavutil . mavlink . MAV_REMOTE_LOG_DATA_BLOCK_STOP , 1 )
return
# if random . random ( ) < 0.1 : # drop 1 packet in 10
# return
if not self . new_log_started :
if self . log_settings . verbose :
print ( "DFLogger: Received data packet - starting new log" )
self . start_new_log ( )
self . new_log_started = True
if self . new_log_started == True :
size = m . block_size
data = '' . join ( str ( chr ( x ) ) for x in m . data [ : size ] )
ofs = size * ( m . block_cnt )
self . logfile . seek ( ofs )
self . logfile . write ( data )
if m . block_cnt in self . missing_blocks :
if self . log_settings . verbose :
print ( "DFLogger: Received missing block: %d" % ( m . block_cnt , ) )
del self . missing_blocks [ m . block_cnt ]
self . missing_found += 1
self . blocks_to_ack_and_nack . append ( [ self . master , m . block_cnt , 1 , now , None ] )
self . acking_blocks [ m . block_cnt ] = 1
# print ( " DFLogger : missing blocks : % s " % ( str ( self . missing _ blocks ) , ) )
else : # ACK the block we just got :
if m . block_cnt in self . acking_blocks : # already acking this one ; we probably sent
# multiple nacks and received this one
# multiple times
pass
else :
self . blocks_to_ack_and_nack . append ( [ self . master , m . block_cnt , 1 , now , None ] )
self . acking_blocks [ m . block_cnt ] = 1
# NACK any blocks we haven ' t seen and should have :
if ( m . block_cnt - self . block_cnt > 1 ) :
for block in range ( self . block_cnt + 1 , m . block_cnt ) :
if block not in self . missing_blocks and block not in self . acking_blocks :
self . missing_blocks [ block ] = 1
if self . log_settings . verbose :
print ( "DFLogger: setting %d for nacking" % ( block , ) )
self . blocks_to_ack_and_nack . append ( [ self . master , block , 0 , now , None ] )
# print " \ nmissed blocks : " , self . missing _ blocks
if self . block_cnt < m . block_cnt :
self . block_cnt = m . block_cnt
self . download += size
elif not self . new_log_started and not self . stopped : # send a start packet every second until the other end gets the idea :
if now - self . time_last_start_packet_sent > 1 :
if self . log_settings . verbose :
print ( "DFLogger: Sending start packet" )
self . master . mav . remote_log_block_status_send ( mavutil . mavlink . MAV_REMOTE_LOG_DATA_BLOCK_START , 1 )
self . time_last_start_packet_sent = now |
def to_long_format ( df , duration_col ) :
"""This function converts a survival analysis DataFrame to a lifelines " long " format . The lifelines " long "
format is used in a common next function , ` ` add _ covariate _ to _ timeline ` ` .
Parameters
df : DataFrame
a DataFrame in the standard survival analysis form ( one for per observation , with covariates , duration and event flag )
duration _ col : string
string representing the column in df that represents the durations of each subject .
Returns
long _ form _ df : DataFrame
A DataFrame with new columns . This can be fed into ` add _ covariate _ to _ timeline `
See Also
to _ episodic _ format
add _ covariate _ to _ timeline""" | return df . assign ( start = 0 , stop = lambda s : s [ duration_col ] ) . drop ( duration_col , axis = 1 ) |
def _initialize ( self , con ) :
"""Set up tables in SQL""" | if self . initialized :
return
SQLite3Database ( ) . _initialize ( con )
# ASE db initialization
cur = con . execute ( 'SELECT COUNT(*) FROM sqlite_master WHERE name="reaction"' )
if cur . fetchone ( ) [ 0 ] == 0 : # no reaction table
for init_command in init_commands :
con . execute ( init_command )
# Create tables
con . commit ( )
self . initialized = True |
def south_field_triple ( self ) :
"Returns a suitable description of this field for South ." | args , kwargs = introspector ( self )
kwargs . update ( { 'populate_from' : 'None' if callable ( self . populate_from ) else repr ( self . populate_from ) , 'unique_with' : repr ( self . unique_with ) } )
return ( 'autoslug.fields.AutoSlugField' , args , kwargs ) |
def stop ( self ) :
"""停止引擎""" | # 将引擎设为停止
self . __active = False
# 停止计时器
self . __timer . stop ( )
# 等待事件处理线程退出
self . __thread . join ( ) |
def rank ( self , ** kwargs ) :
"""Computes numerical rank along axis . Equal values are set to the average .
Returns :
DataManager containing the ranks of the values along an axis .""" | axis = kwargs . get ( "axis" , 0 )
numeric_only = True if axis else kwargs . get ( "numeric_only" , False )
func = self . _prepare_method ( pandas . DataFrame . rank , ** kwargs )
new_data = self . _map_across_full_axis ( axis , func )
# Since we assume no knowledge of internal state , we get the columns
# from the internal partitions .
if numeric_only :
new_columns = self . compute_index ( 1 , new_data , True )
else :
new_columns = self . columns
new_dtypes = pandas . Series ( [ np . float64 for _ in new_columns ] , index = new_columns )
return self . __constructor__ ( new_data , self . index , new_columns , new_dtypes ) |
def _get_name ( self ) :
"""Returns the name , which is generated if it has not been already .""" | if self . _name is None :
self . _name = self . _generate_name ( )
return self . _name |
def show_fabric_trunk_info_output_show_trunk_list_trunk_list_groups_trunk_list_group ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
show_fabric_trunk_info = ET . Element ( "show_fabric_trunk_info" )
config = show_fabric_trunk_info
output = ET . SubElement ( show_fabric_trunk_info , "output" )
show_trunk_list = ET . SubElement ( output , "show-trunk-list" )
trunk_list_groups = ET . SubElement ( show_trunk_list , "trunk-list-groups" )
trunk_list_group = ET . SubElement ( trunk_list_groups , "trunk-list-group" )
trunk_list_group . text = kwargs . pop ( 'trunk_list_group' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def FastaIter ( handle ) :
"""generator that returns ( header , sequence ) tuples from an open FASTA file handle
Lines before the start of the first record are ignored .""" | header = None
for line in handle :
if line . startswith ( ">" ) :
if header is not None : # not the first record
yield header , "" . join ( seq_lines )
seq_lines = list ( )
header = line [ 1 : ] . rstrip ( )
else :
if header is not None : # not the first record
seq_lines . append ( line . strip ( ) )
if header is not None :
yield header , "" . join ( seq_lines )
else : # no FASTA records in file
return |
def fetchone ( table , cols = "*" , where = ( ) , group = "" , order = ( ) , limit = ( ) , ** kwargs ) :
"""Convenience wrapper for database SELECT and fetch one .""" | return select ( table , cols , where , group , order , limit , ** kwargs ) . fetchone ( ) |
def get_a_manager ( threadPool_settings = None ) :
"""On first call , creates and returns a @ mirte . core . Manager . On
subsequent calls , returns the previously created instance .
If it is the first call , it will initialize the threadPool
with @ threadPool _ settings .""" | global __singleton_manager
if __singleton_manager is None :
def _thread_entry ( ) :
if prctl :
prctl . set_name ( 'mirte manager' )
m . run ( )
l . info ( 'manager.run() returned' )
l = logging . getLogger ( 'mirte.get_a_manager' )
l . info ( "Creating new instance" )
m = Manager ( logging . getLogger ( 'mirte' ) )
if threadPool_settings :
m . update_instance ( 'threadPool' , threadPool_settings )
threading . Thread ( target = _thread_entry ) . start ( )
m . running_event . wait ( )
__singleton_manager = m
return __singleton_manager |
def discover ( self , exclude = None ) :
"""Automatically discovers and registers installed formats .
If a format is already registered with an exact same name , the
discovered format will not be registered .
: param exclude : ( optional ) Exclude formats from registering""" | if exclude is None :
exclude = [ ]
elif not isinstance ( exclude , ( list , tuple ) ) :
exclude = [ exclude ]
if 'json' not in exclude and 'json' not in self . registered_formats :
self . discover_json ( )
if 'yaml' not in exclude and 'yaml' not in self . registered_formats :
self . discover_yaml ( ) |
def update ( self , request , key ) :
"""Set an email address as primary address .""" | request . UPDATE = http . QueryDict ( request . body )
email_addr = request . UPDATE . get ( 'email' )
user_id = request . UPDATE . get ( 'user' )
if not email_addr :
return http . HttpResponseBadRequest ( )
try :
email = EmailAddress . objects . get ( address = email_addr , user_id = user_id )
except EmailAddress . DoesNotExist :
raise http . Http404
email . user . email = email_addr
email . user . save ( )
return http . HttpResponse ( status = 204 ) |
def get_child_edge ( cls , index , left_parent , right_parent ) :
"""Construct a child edge from two parent edges .""" | [ ed1 , ed2 , depend_set ] = cls . _identify_eds_ing ( left_parent , right_parent )
left_u , right_u = cls . get_conditional_uni ( left_parent , right_parent )
X = np . array ( [ [ x , y ] for x , y in zip ( left_u , right_u ) ] )
name , theta = Bivariate . select_copula ( X )
new_edge = Edge ( index , ed1 , ed2 , name , theta )
new_edge . D = depend_set
new_edge . parents = [ left_parent , right_parent ]
return new_edge |
def shorten ( text ) :
"""Reduce text length for displaying / logging purposes .""" | if len ( text ) >= MAX_DISPLAY_LEN :
text = text [ : MAX_DISPLAY_LEN // 2 ] + "..." + text [ - MAX_DISPLAY_LEN // 2 : ]
return text |
def calc_mass_from_fit_and_conv_factor ( A , Damping , ConvFactor ) :
"""Calculates mass from the A parameter from fitting , the damping from
fitting in angular units and the Conversion factor calculated from
comparing the ratio of the z signal and first harmonic of z .
Parameters
A : float
A factor calculated from fitting
Damping : float
damping in radians / second calcualted from fitting
ConvFactor : float
conversion factor between volts and nms
Returns
mass : float
mass in kgs""" | T0 = 300
mFromA = 2 * Boltzmann * T0 / ( pi * A ) * ConvFactor ** 2 * Damping
return mFromA |
def local_manager_is_default ( self , adm_gid , gid ) :
"""Check whether gid is default group for local manager group .""" | config = self . root [ 'settings' ] [ 'ugm_localmanager' ] . attrs
rule = config [ adm_gid ]
if gid not in rule [ 'target' ] :
raise Exception ( u"group '%s' not managed by '%s'" % ( gid , adm_gid ) )
return gid in rule [ 'default' ] |
def getAllViewsAsDict ( self ) :
"""Return all the stats views ( dict ) .""" | return { p : self . _plugins [ p ] . get_views ( ) for p in self . _plugins } |
def sendPartialResponse ( self ) :
"""Send a partial response without closing the connection .
: return : < void >""" | self . requestProtocol . requestResponse [ "code" ] = ( self . responseCode )
self . requestProtocol . requestResponse [ "content" ] = ( self . responseContent )
self . requestProtocol . requestResponse [ "errors" ] = ( self . responseErrors )
self . requestProtocol . sendPartialRequestResponse ( ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.