signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def siret ( self , max_sequential_digits = 2 ) :
"""Generates a siret number ( 14 digits ) .
It is in fact the result of the concatenation of a siren number ( 9 digits ) ,
a sequential number ( 4 digits ) and a control number ( 1 digit ) concatenation .
If $ max _ sequential _ digits is invalid , it is set to 2.
: param max _ sequential _ digits The maximum number of digits for the sequential number ( > 0 & & < = 4 ) ."""
|
if max_sequential_digits > 4 or max_sequential_digits <= 0 :
max_sequential_digits = 2
sequential_number = str ( self . random_number ( max_sequential_digits ) ) . zfill ( 4 )
return self . numerify ( self . siren ( ) + ' ' + sequential_number + '#' )
|
def _check_all_metadata_found ( metadata , items ) :
"""Print warning if samples in CSV file are missing in folder"""
|
for name in metadata :
seen = False
for sample in items :
check_file = sample [ "files" ] [ 0 ] if sample . get ( "files" ) else sample [ "vrn_file" ]
if isinstance ( name , ( tuple , list ) ) :
if check_file . find ( name [ 0 ] ) > - 1 :
seen = True
elif check_file . find ( name ) > - 1 :
seen = True
elif "*" in name and fnmatch . fnmatch ( check_file , "*/%s" % name ) :
seen = True
if not seen :
print ( "WARNING: sample not found %s" % str ( name ) )
|
def logic_subset ( self , op = None ) :
"""Return set of logicnets , filtered by the type ( s ) of logic op provided as op .
If no op is specified , the full set of logicnets associated with the Block are
returned . This is helpful for getting all memories of a block for example ."""
|
if op is None :
return self . logic
else :
return set ( x for x in self . logic if x . op in op )
|
def season_id ( x ) :
"""takes in 4 - digit years and returns API formatted seasonID
Input Values : YYYY
Used in :"""
|
if len ( str ( x ) ) == 4 :
try :
return "" . join ( [ "2" , str ( x ) ] )
except ValueError :
raise ValueError ( "Enter the four digit year for the first half of the desired season" )
else :
raise ValueError ( "Enter the four digit year for the first half of the desired season" )
|
def solar_irradiation ( latitude , longitude , Z , moment , surface_tilt , surface_azimuth , T = None , P = None , solar_constant = 1366.1 , atmos_refract = 0.5667 , albedo = 0.25 , linke_turbidity = None , extraradiation_method = 'spencer' , airmass_model = 'kastenyoung1989' , cache = None ) :
r'''Calculates the amount of solar radiation and radiation reflected back
the atmosphere which hits a surface at a specified tilt , and facing a
specified azimuth .
This functions is a wrapper for the incredibly
comprehensive ` pvlib library < https : / / github . com / pvlib / pvlib - python > ` _ ,
and requires it to be installed .
Parameters
latitude : float
Latitude , between - 90 and 90 [ degrees ]
longitude : float
Longitude , between - 180 and 180 , [ degrees ]
Z : float , optional
Elevation above sea level for the position , [ m ]
moment : datetime
Time and date for the calculation , in local UTC time ( not daylight
savings time ) , [ - ]
surface _ tilt : float
The angle above the horizontal of the object being hit by radiation ,
[ degrees ]
surface _ azimuth : float
The angle the object is facing ( positive North eastwards 0 ° to 360 ° ) ,
[ degrees ]
T : float , optional
Temperature of atmosphere at ground level , [ K ]
P : float , optional
Pressure of atmosphere at ground level , [ Pa ]
solar _ constant : float , optional
The amount of solar radiation which reaches earth ' s disk ( at a
standardized distance of 1 AU ) ; this constant is independent of
activity or conditions on earth , but will vary throughout the sun ' s
lifetime and may increase or decrease slightly due to solar activity ,
[ W / m ^ 2]
atmos _ refract : float , optional
Atmospheric refractivity at sunrise / sunset ( 0.5667 deg is an often used
value ; this varies substantially and has an impact of a few minutes on
when sunrise and sunset is ) , [ degrees ]
albedo : float , optional
The average amount of reflection of the terrain surrounding the object
at quite a distance ; this impacts how much sunlight reflected off the
ground , gest reflected back off clouds , [ - ]
linke _ turbidity : float , optional
The amount of pollution / water in the sky versus a perfect clear sky ;
If not specified , this will be retrieved from a historical grid ;
typical values are 3 for cloudy , and 7 for severe pollution around a
city , [ - ]
extraradiation _ method : str , optional
The specified method to calculate the effect of earth ' s position on the
amount of radiation which reaches earth according to the methods
available in the ` pvlib ` library , [ - ]
airmass _ model : str , optional
The specified method to calculate the amount of air the sunlight
needs to travel through to reach the earth according to the methods
available in the ` pvlib ` library , [ - ]
cache : dict , optional
Dictionary to to check for values to use to skip some calculations ;
` apparent _ zenith ` , ` zenith ` , ` azimuth ` supported , [ - ]
Returns
poa _ global : float
The total irradiance in the plane of the surface , [ W / m ^ 2]
poa _ direct : float
The total beam irradiance in the plane of the surface , [ W / m ^ 2]
poa _ diffuse : float
The total diffuse irradiance in the plane of the surface , [ W / m ^ 2]
poa _ sky _ diffuse : float
The sky component of the diffuse irradiance , excluding the impact
from the ground , [ W / m ^ 2]
poa _ ground _ diffuse : float
The ground - sky diffuse irradiance component , [ W / m ^ 2]
Examples
> > > solar _ irradiation ( Z = 1100.0 , latitude = 51.0486 , longitude = - 114.07,
. . . moment = datetime ( 2018 , 4 , 15 , 13 , 43 , 5 ) , surface _ tilt = 41.0,
. . . surface _ azimuth = 180.0)
(1065.7621896280812 , 945.2656564506323 , 120.49653317744884 , 95.31535344213178 , 25.181179735317063)
> > > cache = { ' apparent _ zenith ' : 41.099082295767545 , ' zenith ' : 41.11285376417578 , ' azimuth ' : 182.5631874250523}
> > > solar _ irradiation ( Z = 1100.0 , latitude = 51.0486 , longitude = - 114.07,
. . . moment = datetime ( 2018 , 4 , 15 , 13 , 43 , 5 ) , surface _ tilt = 41.0,
. . . linke _ turbidity = 3 , T = 300 , P = 1E5,
. . . surface _ azimuth = 180.0 , cache = cache )
(1042.5677703677097 , 918.2377548545295 , 124.33001551318027 , 99.6228657378363 , 24.70714977534396)
At night , there is no solar radiation and this function returns zeros :
> > > solar _ irradiation ( Z = 1100.0 , latitude = 51.0486 , longitude = - 114.07,
. . . moment = datetime ( 2018 , 4 , 15 , 2 , 43 , 5 ) , surface _ tilt = 41.0,
. . . surface _ azimuth = 180.0)
(0.0 , - 0.0 , 0.0 , 0.0 , 0.0)
Notes
The retrieval of ` linke _ turbidity ` requires the pytables library ( and
Pandas ) ; if it is not installed , specify a value of ` linke _ turbidity ` to
avoid the dependency .
There is some redundancy of the calculated results , according to the
following relations . The total irradiance is normally that desired for
engineering calculations .
poa _ diffuse = poa _ ground _ diffuse + poa _ sky _ diffuse
poa _ global = poa _ direct + poa _ diffuse
FOr a surface such as a pipe or vessel , an approach would be to split it
into a number of rectangles and sum up the radiation absorbed by each .
This calculation is fairly slow .
References
. . [ 1 ] Will Holmgren , Calama - Consulting , Tony Lorenzo , Uwe Krien , bmu ,
DaCoEx , mayudong , et al . Pvlib / Pvlib - Python : 0.5.1 . Zenodo , 2017.
https : / / doi . org / 10.5281 / zenodo . 1016425.'''
|
# Atmospheric refraction at sunrise / sunset ( 0.5667 deg is an often used value )
from fluids . optional import spa
from fluids . optional . irradiance import ( get_relative_airmass , get_absolute_airmass , ineichen , get_relative_airmass , get_absolute_airmass , get_total_irradiance )
# try :
# import pvlib
# except :
# raise ImportError ( PVLIB _ MISSING _ MSG )
moment_timetuple = moment . timetuple ( )
moment_arg_dni = ( moment_timetuple . tm_yday if extraradiation_method == 'spencer' else moment )
dni_extra = _get_extra_radiation_shim ( moment_arg_dni , solar_constant = solar_constant , method = extraradiation_method , epoch_year = moment . year )
if T is None or P is None :
atmosphere = ATMOSPHERE_NRLMSISE00 ( Z = Z , latitude = latitude , longitude = longitude , day = moment_timetuple . tm_yday )
if T is None :
T = atmosphere . T
if P is None :
P = atmosphere . P
if cache is not None and 'zenith' in cache :
zenith = cache [ 'zenith' ]
apparent_zenith = cache [ 'apparent_zenith' ]
azimuth = cache [ 'azimuth' ]
else :
apparent_zenith , zenith , _ , _ , azimuth , _ = solar_position ( moment = moment , latitude = latitude , longitude = longitude , Z = Z , T = T , P = P , atmos_refract = atmos_refract )
if linke_turbidity is None :
from pvlib . clearsky import lookup_linke_turbidity
import pandas as pd
linke_turbidity = float ( lookup_linke_turbidity ( pd . DatetimeIndex ( [ moment ] ) , latitude , longitude ) . values )
if airmass_model in apparent_zenith_airmass_models :
used_zenith = apparent_zenith
elif airmass_model in true_zenith_airmass_models :
used_zenith = zenith
else :
raise Exception ( 'Unrecognized airmass model' )
relative_airmass = get_relative_airmass ( used_zenith , model = airmass_model )
airmass_absolute = get_absolute_airmass ( relative_airmass , pressure = P )
ans = ineichen ( apparent_zenith = apparent_zenith , airmass_absolute = airmass_absolute , linke_turbidity = linke_turbidity , altitude = Z , dni_extra = solar_constant , perez_enhancement = True )
ghi = ans [ 'ghi' ]
dni = ans [ 'dni' ]
dhi = ans [ 'dhi' ]
# from pvlib . irradiance import get _ total _ irradiance
ans = get_total_irradiance ( surface_tilt = surface_tilt , surface_azimuth = surface_azimuth , solar_zenith = apparent_zenith , solar_azimuth = azimuth , dni = dni , ghi = ghi , dhi = dhi , dni_extra = dni_extra , airmass = airmass_absolute , albedo = albedo )
poa_global = float ( ans [ 'poa_global' ] )
poa_direct = float ( ans [ 'poa_direct' ] )
poa_diffuse = float ( ans [ 'poa_diffuse' ] )
poa_sky_diffuse = float ( ans [ 'poa_sky_diffuse' ] )
poa_ground_diffuse = float ( ans [ 'poa_ground_diffuse' ] )
return ( poa_global , poa_direct , poa_diffuse , poa_sky_diffuse , poa_ground_diffuse )
|
def printAllColorsToConsole ( cls ) :
'''A simple enumeration of the colors to the console to help decide : )'''
|
for elem in cls . __dict__ : # ignore specials such as _ _ class _ _ or _ _ module _ _
if not elem . startswith ( "__" ) :
color_fmt = cls . __dict__ [ elem ]
if isinstance ( color_fmt , six . string_types ) and color_fmt != "BOLD" and color_fmt != "DIM" and color_fmt != "UNDER" and color_fmt != "INV" :
print ( "\033[{fmt}AnsiColors.{name}\033[0m" . format ( fmt = color_fmt , name = elem ) )
|
def calc_mdl ( yx_dist , y_dist ) :
"""Function calculates mdl with given label distributions .
yx _ dist : list of dictionaries - for every split it contains a dictionary with label distributions
y _ dist : dictionary - all label distributions
Reference :
Igor Kononenko . On biases in estimating multi - valued attributes . In IJCAI , volume 95 , pages 1034-1040 , 1995."""
|
prior = multinomLog2 ( y_dist . values ( ) )
prior += multinomLog2 ( [ len ( y_dist . keys ( ) ) - 1 , sum ( y_dist . values ( ) ) ] )
post = 0
for x_val in yx_dist :
post += multinomLog2 ( [ x_val . get ( c , 0 ) for c in y_dist . keys ( ) ] )
post += multinomLog2 ( [ len ( y_dist . keys ( ) ) - 1 , sum ( x_val . values ( ) ) ] )
return ( prior - post ) / float ( sum ( y_dist . values ( ) ) )
|
def use_plenary_repository_view ( self ) :
"""Pass through to provider AssetRepositorySession . use _ plenary _ repository _ view"""
|
self . _repository_view = PLENARY
# self . _ get _ provider _ session ( ' asset _ repository _ session ' ) # To make sure the session is tracked
for session in self . _get_provider_sessions ( ) :
try :
session . use_plenary_repository_view ( )
except AttributeError :
pass
|
def update_datetime ( value , range = None ) :
"""Updates ( drifts ) a Date value within specified range defined
: param value : a Date value to drift .
: param range : ( optional ) a range in milliseconds . Default : 10 days
: return : an updated DateTime value ."""
|
range = range if range != None else 10
if range < 0 :
return value
days = RandomFloat . next_float ( - range , range )
return value + datetime . timedelta ( days )
|
def getInstance ( cls , * args ) :
'''Returns a singleton instance of the class'''
|
if not cls . __singleton :
cls . __singleton = DriverManager ( * args )
return cls . __singleton
|
def convert_widgets ( self ) :
"""During form initialization , some widgets have to be replaced by a counterpart suitable to
be rendered the AngularJS way ."""
|
warnings . warn ( "Will be removed after dropping support for Django-1.10" , PendingDeprecationWarning )
widgets_module = getattr ( self , 'widgets_module' , 'djng.widgets' )
for field in self . base_fields . values ( ) :
if hasattr ( field , 'get_converted_widget' ) :
new_widget = field . get_converted_widget ( widgets_module )
if new_widget :
field . widget = new_widget
|
def negative_binomial ( k = 1 , p = 1 , shape = _Null , dtype = _Null , ctx = None , out = None , ** kwargs ) :
"""Draw random samples from a negative binomial distribution .
Samples are distributed according to a negative binomial distribution
parametrized by * k * ( limit of unsuccessful experiments ) and * p * ( failure
probability in each experiment ) . Samples will always be returned as a
floating point data type .
Parameters
k : float or NDArray , optional
Limit of unsuccessful experiments , > 0.
p : float or NDArray , optional
Failure probability in each experiment , > = 0 and < = 1.
shape : int or tuple of ints , optional
The number of samples to draw . If shape is , e . g . , ` ( m , n ) ` and ` k ` and
` p ` are scalars , output shape will be ` ( m , n ) ` . If ` k ` and ` p `
are NDArrays with shape , e . g . , ` ( x , y ) ` , then output will have shape
` ( x , y , m , n ) ` , where ` m * n ` samples are drawn for each ` [ k , p ) ` pair .
dtype : { ' float16 ' , ' float32 ' , ' float64 ' } , optional
Data type of output samples . Default is ' float32'
ctx : Context , optional
Device context of output . Default is current context . Overridden by
` k . context ` when ` k ` is an NDArray .
out : NDArray , optional
Store output to an existing NDArray .
Returns
NDArray
If input ` shape ` has shape , e . g . , ` ( m , n ) ` and ` k ` and ` p ` are scalars , output shape
will be ` ( m , n ) ` . If ` k ` and ` p ` are NDArrays with shape , e . g . , ` ( x , y ) ` , then
output will have shape ` ( x , y , m , n ) ` , where ` m * n ` samples are drawn for each ` [ k , p ) ` pair .
Examples
> > > mx . nd . random . negative _ binomial ( 10 , 0.5)
< NDArray 1 @ cpu ( 0 ) >
> > > mx . nd . random . negative _ binomial ( 10 , 0.5 , shape = ( 2 , ) )
[ 3 . 4 . ]
< NDArray 2 @ cpu ( 0 ) >
> > > k = mx . nd . array ( [ 1,2,3 ] )
> > > p = mx . nd . array ( [ 0.2,0.4,0.6 ] )
> > > mx . nd . random . negative _ binomial ( k , p , shape = 2)
[ [ 3 . 2 . ]
[ 4 . 4 . ]
[ 0 . 5 . ] ]
< NDArray 3x2 @ cpu ( 0 ) >"""
|
return _random_helper ( _internal . _random_negative_binomial , _internal . _sample_negative_binomial , [ k , p ] , shape , dtype , ctx , out , kwargs )
|
def get_ZXY_data ( Data , zf , xf , yf , FractionOfSampleFreq = 1 , zwidth = 10000 , xwidth = 5000 , ywidth = 5000 , filterImplementation = "filtfilt" , timeStart = None , timeEnd = None , NPerSegmentPSD = 1000000 , MakeFig = True , show_fig = True ) :
"""Given a Data object and the frequencies of the z , x and y peaks ( and some
optional parameters for the created filters ) this function extracts the
individual z , x and y signals ( in volts ) by creating IIR filters and filtering
the Data .
Parameters
Data : DataObject
DataObject containing the data for which you want to extract the
z , x and y signals .
zf : float
The frequency of the z peak in the PSD
xf : float
The frequency of the x peak in the PSD
yf : float
The frequency of the y peak in the PSD
FractionOfSampleFreq : integer , optional
The fraction of the sample frequency to sub - sample the data by .
This sometimes needs to be done because a filter with the appropriate
frequency response may not be generated using the sample rate at which
the data was taken . Increasing this number means the x , y and z signals
produced by this function will be sampled at a lower rate but a higher
number means a higher chance that the filter produced will have a nice
frequency response .
zwidth : float , optional
The width of the pass - band of the IIR filter to be generated to
filter Z .
xwidth : float , optional
The width of the pass - band of the IIR filter to be generated to
filter X .
ywidth : float , optional
The width of the pass - band of the IIR filter to be generated to
filter Y .
filterImplementation : string , optional
filtfilt or lfilter - use scipy . filtfilt or lfilter
default : filtfilt
timeStart : float , optional
Starting time for filtering
timeEnd : float , optional
Ending time for filtering
show _ fig : bool , optional
If True - plot unfiltered and filtered PSD for z , x and y .
If False - don ' t plot anything
Returns
zdata : ndarray
Array containing the z signal in volts with time .
xdata : ndarray
Array containing the x signal in volts with time .
ydata : ndarray
Array containing the y signal in volts with time .
timedata : ndarray
Array containing the time data to go with the z , x , and y signal ."""
|
if timeStart == None :
timeStart = Data . timeStart
if timeEnd == None :
timeEnd = Data . timeEnd
time = Data . time . get_array ( )
StartIndex = _np . where ( time == take_closest ( time , timeStart ) ) [ 0 ] [ 0 ]
EndIndex = _np . where ( time == take_closest ( time , timeEnd ) ) [ 0 ] [ 0 ]
SAMPLEFREQ = Data . SampleFreq / FractionOfSampleFreq
if filterImplementation == "filtfilt" :
ApplyFilter = scipy . signal . filtfilt
elif filterImplementation == "lfilter" :
ApplyFilter = scipy . signal . lfilter
else :
raise ValueError ( "filterImplementation must be one of [filtfilt, lfilter] you entered: {}" . format ( filterImplementation ) )
input_signal = Data . voltage [ StartIndex : EndIndex ] [ 0 : : FractionOfSampleFreq ]
bZ , aZ = make_butterworth_bandpass_b_a ( zf , zwidth , SAMPLEFREQ )
print ( "filtering Z" )
zdata = ApplyFilter ( bZ , aZ , input_signal )
if ( _np . isnan ( zdata ) . any ( ) ) :
raise ValueError ( "Value Error: FractionOfSampleFreq must be higher, a sufficiently small sample frequency should be used to produce a working IIR filter." )
bX , aX = make_butterworth_bandpass_b_a ( xf , xwidth , SAMPLEFREQ )
print ( "filtering X" )
xdata = ApplyFilter ( bX , aX , input_signal )
if ( _np . isnan ( xdata ) . any ( ) ) :
raise ValueError ( "Value Error: FractionOfSampleFreq must be higher, a sufficiently small sample frequency should be used to produce a working IIR filter." )
bY , aY = make_butterworth_bandpass_b_a ( yf , ywidth , SAMPLEFREQ )
print ( "filtering Y" )
ydata = ApplyFilter ( bY , aY , input_signal )
if ( _np . isnan ( ydata ) . any ( ) ) :
raise ValueError ( "Value Error: FractionOfSampleFreq must be higher, a sufficiently small sample frequency should be used to produce a working IIR filter." )
if MakeFig == True :
f , PSD = scipy . signal . welch ( input_signal , SAMPLEFREQ , nperseg = NPerSegmentPSD )
f_z , PSD_z = scipy . signal . welch ( zdata , SAMPLEFREQ , nperseg = NPerSegmentPSD )
f_y , PSD_y = scipy . signal . welch ( ydata , SAMPLEFREQ , nperseg = NPerSegmentPSD )
f_x , PSD_x = scipy . signal . welch ( xdata , SAMPLEFREQ , nperseg = NPerSegmentPSD )
fig , ax = _plt . subplots ( figsize = properties [ "default_fig_size" ] )
ax . plot ( f , PSD )
ax . plot ( f_z , PSD_z , label = "z" )
ax . plot ( f_x , PSD_x , label = "x" )
ax . plot ( f_y , PSD_y , label = "y" )
ax . legend ( loc = "best" )
ax . semilogy ( )
ax . set_xlim ( [ zf - zwidth , yf + ywidth ] )
else :
fig = None
ax = None
if show_fig == True :
_plt . show ( )
timedata = time [ StartIndex : EndIndex ] [ 0 : : FractionOfSampleFreq ]
return zdata , xdata , ydata , timedata , fig , ax
|
def draw ( self , parent , box ) :
'''redraw the text'''
|
import wx
if self . textctrl is None :
self . textctrl = wx . TextCtrl ( parent , style = wx . TE_MULTILINE | wx . TE_READONLY )
self . textctrl . WriteText ( self . text )
self . _resize ( )
box . Add ( self . textctrl , flag = wx . LEFT , border = 0 )
box . Layout ( )
|
def parse_requirements ( strs ) :
"""Yield ` ` Requirement ` ` objects for each specification in ` strs `
` strs ` must be a string , or a ( possibly - nested ) iterable thereof ."""
|
# create a steppable iterator , so we can handle \ - continuations
lines = iter ( yield_lines ( strs ) )
for line in lines : # Drop comments - - a hash without a space may be in a URL .
if ' #' in line :
line = line [ : line . find ( ' #' ) ]
# If there is a line continuation , drop it , and append the next line .
if line . endswith ( '\\' ) :
line = line [ : - 2 ] . strip ( )
try :
line += next ( lines )
except StopIteration :
return
yield Requirement ( line )
|
def draw ( self ) :
"""Return an ASCII representation of the network ."""
|
str_wires = [ [ "-" ] * 7 * self . depth ]
str_wires [ 0 ] [ 0 ] = "0"
str_wires [ 0 ] [ 1 ] = " o"
str_spaces = [ ]
for i in range ( 1 , self . dimension ) :
str_wires . append ( [ "-" ] * 7 * self . depth )
str_spaces . append ( [ " " ] * 7 * self . depth )
str_wires [ i ] [ 0 ] = str ( i )
str_wires [ i ] [ 1 ] = " o"
for index , level in enumerate ( self ) :
for wire1 , wire2 in level :
str_wires [ wire1 ] [ ( index + 1 ) * 6 ] = "x"
str_wires [ wire2 ] [ ( index + 1 ) * 6 ] = "x"
for i in range ( wire1 , wire2 ) :
str_spaces [ i ] [ ( index + 1 ) * 6 + 1 ] = "|"
for i in range ( wire1 + 1 , wire2 ) :
str_wires [ i ] [ ( index + 1 ) * 6 ] = "|"
network_draw = "" . join ( str_wires [ 0 ] )
for line , space in zip ( str_wires [ 1 : ] , str_spaces ) :
network_draw += "\n"
network_draw += "" . join ( space )
network_draw += "\n"
network_draw += "" . join ( line )
return network_draw
|
def add_agent ( self , agent ) :
"""Add an INDRA Agent and its conditions to the Nugget ."""
|
agent_id = self . add_node ( agent . name )
self . add_typing ( agent_id , 'agent' )
# Handle bound conditions
for bc in agent . bound_conditions : # Here we make the assumption that the binding site
# is simply named after the binding partner
if bc . is_bound :
test_type = 'is_bnd'
else :
test_type = 'is_free'
bound_name = bc . agent . name
agent_bs = get_binding_site_name ( bc . agent )
test_name = '%s_bound_to_%s_test' % ( agent_id , bound_name )
agent_bs_id = self . add_node ( agent_bs )
test_id = self . add_node ( test_name )
self . add_edge ( agent_bs_id , agent_id )
self . add_edge ( agent_bs_id , test_id )
self . add_typing ( agent_bs_id , 'locus' )
self . add_typing ( test_id , test_type )
for mod in agent . mods :
mod_site_str = abbrevs [ mod . mod_type ]
if mod . residue is not None :
mod_site_str = mod . residue
mod_pos_str = mod . position if mod . position is not None else ''
mod_site = ( '%s%s' % ( mod_site_str , mod_pos_str ) )
site_states = states [ mod . mod_type ]
if mod . is_modified :
val = site_states [ 1 ]
else :
val = site_states [ 0 ]
mod_site_id = self . add_node ( mod_site , { 'val' : val } )
self . add_edge ( mod_site_id , agent_id )
self . add_typing ( mod_site_id , 'state' )
return agent_id
|
def generate_const ( self ) :
"""Means that value is valid when is equeal to const definition .
. . code - block : : python
' const ' : 42,
Only valid value is 42 in this example ."""
|
const = self . _definition [ 'const' ]
if isinstance ( const , str ) :
const = '"{}"' . format ( const )
with self . l ( 'if {variable} != {}:' , const ) :
self . l ( 'raise JsonSchemaException("{name} must be same as const definition")' )
|
def xpathNextNamespace ( self , cur ) :
"""Traversal function for the " namespace " direction the
namespace axis contains the namespace nodes of the context
node ; the order of nodes on this axis is
implementation - defined ; the axis will be empty unless the
context node is an element We keep the XML namespace node
at the end of the list ."""
|
if cur is None :
cur__o = None
else :
cur__o = cur . _o
ret = libxml2mod . xmlXPathNextNamespace ( self . _o , cur__o )
if ret is None :
raise xpathError ( 'xmlXPathNextNamespace() failed' )
__tmp = xmlNode ( _obj = ret )
return __tmp
|
def create_dset_to3d ( prefix , file_list , file_order = 'zt' , num_slices = None , num_reps = None , TR = None , slice_order = 'alt+z' , only_dicoms = True , sort_filenames = False ) :
'''manually create dataset by specifying everything ( not recommended , but necessary when autocreation fails )
If ` num _ slices ` or ` num _ reps ` is omitted , it will be inferred by the number of images . If both are omitted ,
it assumes that this it not a time - dependent dataset
: only _ dicoms : filter the given list by readable DICOM images
: sort _ filenames : sort the given files by filename using the right - most number in the filename'''
|
tags = { 'num_rows' : ( 0x0028 , 0x0010 ) , 'num_reps' : ( 0x0020 , 0x0105 ) , 'TR' : ( 0x0018 , 0x0080 ) }
with nl . notify ( 'Trying to create dataset %s' % prefix ) :
if os . path . exists ( prefix ) :
nl . notify ( 'Error: file "%s" already exists!' % prefix , level = nl . level . error )
return False
tagvals = { }
for f in file_list :
try :
tagvals [ f ] = info_for_tags ( f , tags . values ( ) )
except :
pass
if only_dicoms :
new_file_list = [ ]
for f in file_list :
if f in tagvals and len ( tagvals [ f ] [ tags [ 'num_rows' ] ] ) > 0 : # Only include DICOMs that actually have image information
new_file_list . append ( f )
file_list = new_file_list
if sort_filenames :
def file_num ( fname ) :
try :
nums = [ x . strip ( '.' ) for x in re . findall ( r'[\d.]+' , fname ) if x . strip ( '.' ) != '' ]
return float ( nums [ - 1 ] )
except :
return fname
file_list = sorted ( file_list , key = file_num )
if len ( file_list ) == 0 :
nl . notify ( 'Error: Couldn\'t find any valid DICOM images' , level = nl . level . error )
return False
cmd = [ 'to3d' , '-skip_outliers' , '-quit_on_err' , '-prefix' , prefix ]
if num_slices != None or num_reps != None : # Time - based dataset
if num_slices == None :
if len ( file_list ) % num_reps != 0 :
nl . notify ( 'Error: trying to guess # of slices, but %d (number for files) doesn\'t divide evenly into %d (number of reps)' % ( len ( file_list ) , num_reps ) , level = nl . level . error )
return False
num_slices = len ( file_list ) / num_reps
if num_reps == None :
if len ( file_list ) % num_slices == 0 :
num_reps = len ( file_list ) / num_slices
elif len ( file_list ) == 1 and tags [ 'num_reps' ] in tagvals [ file_list [ 0 ] ] :
num_reps = tagvals [ file_list [ 0 ] ] [ tags [ 'num_reps' ] ]
else :
nl . notify ( 'Error: trying to guess # of reps, but %d (number for files) doesn\'t divide evenly into %d (number of slices)' % ( len ( file_list ) , num_slices ) , level = nl . level . error )
return False
if TR == None :
TR = tagvals [ file_list [ 0 ] ] [ tags [ 'TR' ] ]
cmd += [ '-time:%s' % file_order ]
if file_order == 'zt' :
cmd += [ num_slices , num_reps ]
else :
cmd += [ num_reps , num_slices ]
cmd += [ TR , slice_order ]
cmd += [ '-@' ]
cmd = [ str ( x ) for x in cmd ]
out = None
try :
p = subprocess . Popen ( cmd , stdin = subprocess . PIPE , stdout = subprocess . PIPE , stderr = subprocess . PIPE )
out = p . communicate ( '\n' . join ( file_list ) )
if p . returncode != 0 :
raise Exception
except :
with nl . notify ( 'Error: to3d returned error' , level = nl . level . error ) :
if out :
nl . notify ( 'stdout:\n' + out [ 0 ] + '\nstderr:\n' + out [ 1 ] , level = nl . level . error )
return False
|
def callback ( self , request , ** kwargs ) :
"""Called from the Service when the user accept to activate it
: param request : request object
: return : callback url
: rtype : string , path to the template"""
|
access_token = Pocket . get_access_token ( consumer_key = self . consumer_key , code = request . session [ 'request_token' ] )
kwargs = { 'access_token' : access_token }
return super ( ServicePocket , self ) . callback ( request , ** kwargs )
|
def create_upload_url ( success_path , max_bytes_per_blob = None , max_bytes_total = None , ** options ) :
"""Create upload URL for POST form .
Args :
success _ path : Path within application to call when POST is successful
and upload is complete .
max _ bytes _ per _ blob : The maximum size in bytes that any one blob in the
upload can be or None for no maximum size .
max _ bytes _ total : The maximum size in bytes that the aggregate sizes of all
of the blobs in the upload can be or None for no maximum size .
* * options : Options for create _ rpc ( ) .
Returns :
The upload URL .
Raises :
TypeError : If max _ bytes _ per _ blob or max _ bytes _ total are not integral types .
ValueError : If max _ bytes _ per _ blob or max _ bytes _ total are not
positive values ."""
|
fut = create_upload_url_async ( success_path , max_bytes_per_blob = max_bytes_per_blob , max_bytes_total = max_bytes_total , ** options )
return fut . get_result ( )
|
def run_forever ( lcdproc = '' , mpd = '' , lcdproc_screen = DEFAULT_LCD_SCREEN_NAME , lcdproc_charset = DEFAULT_LCDPROC_CHARSET , lcdd_debug = False , pattern = '' , patterns = [ ] , refresh = DEFAULT_REFRESH , backlight_on = DEFAULT_BACKLIGHT_ON , priority_playing = DEFAULT_PRIORITY , priority_not_playing = DEFAULT_PRIORITY , retry_attempts = DEFAULT_RETRY_ATTEMPTS , retry_wait = DEFAULT_RETRY_WAIT , retry_backoff = DEFAULT_RETRY_BACKOFF ) :
"""Run the server .
Args :
lcdproc ( str ) : the target connection ( host : port ) for lcdproc
mpd ( str ) : the target connection ( [ pwd @ ] host : port ) for mpd
lcdproc _ screen ( str ) : the name of the screen to use for lcdproc
lcdproc _ charset ( str ) : the charset to use with lcdproc
lcdd _ debug ( bool ) : whether to enable full LCDd debug
pattern ( str ) : the pattern to use
patterns ( str list ) : the patterns to use
refresh ( float ) : how often to refresh the display
backlight _ on ( str ) : the rules for activating backlight
retry _ attempts ( int ) : number of connection attempts
retry _ wait ( int ) : time between connection attempts
retry _ backoff ( int ) : increase to between - attempts delay"""
|
# Compute host / ports
lcd_conn = _make_hostport ( lcdproc , 'localhost' , 13666 )
mpd_conn = _make_hostport ( mpd , 'localhost' , 6600 )
# Prepare auto - retry
retry_config = utils . AutoRetryConfig ( retry_attempts = retry_attempts , retry_backoff = retry_backoff , retry_wait = retry_wait )
# Setup MPD client
mpd_client = mpdwrapper . MPDClient ( host = mpd_conn . hostname , port = mpd_conn . port , password = mpd_conn . username , retry_config = retry_config , )
# Setup LCDd client
lcd = _make_lcdproc ( lcd_conn . hostname , lcd_conn . port , lcdd_debug = lcdd_debug , charset = lcdproc_charset , retry_config = retry_config , )
# Setup connector
runner = lcdrunner . MpdRunner ( mpd_client , lcd , lcdproc_screen = lcdproc_screen , refresh_rate = refresh , retry_config = retry_config , backlight_on = backlight_on , priority_playing = priority_playing , priority_not_playing = priority_not_playing , )
# Fill pattern
if pattern : # If a specific pattern was given , use it
patterns = [ pattern ]
elif not patterns : # If no patterns were given , use the defaults
patterns = DEFAULT_PATTERNS
pattern_list = _make_patterns ( patterns )
mpd_hook_registry = mpdhooks . HookRegistry ( )
runner . setup_pattern ( pattern_list , hook_registry = mpd_hook_registry )
# Launch
mpd_client . connect ( )
runner . run ( )
# Exit
logging . shutdown ( )
|
def pass_allowedremoterelieve_v1 ( self ) :
"""Update the outlet link sequence | dam _ outlets . R | ."""
|
flu = self . sequences . fluxes . fastaccess
sen = self . sequences . senders . fastaccess
sen . r [ 0 ] += flu . allowedremoterelieve
|
def ListProfilers ( self ) :
"""Lists information about the available profilers ."""
|
table_view = views . ViewsFactory . GetTableView ( self . _views_format_type , column_names = [ 'Name' , 'Description' ] , title = 'Profilers' )
profilers_information = sorted ( profiling . ProfilingArgumentsHelper . PROFILERS_INFORMATION . items ( ) )
for name , description in profilers_information :
table_view . AddRow ( [ name , description ] )
table_view . Write ( self . _output_writer )
|
def pull_requests ( self ) :
'''Looks for any of the following pull request formats in the description field :
pr12345 , pr 2345 , PR2345 , PR 2345'''
|
pr_numbers = re . findall ( r"[pP][rR]\s?[0-9]+" , self . description )
pr_numbers += re . findall ( re . compile ( "pull\s?request\s?[0-9]+" , re . IGNORECASE ) , self . description )
# Remove Duplicates
pr_numbers = [ re . sub ( '[^0-9]' , '' , p ) for p in pr_numbers ]
return pr_numbers
|
def first_interval_starting ( self , start : datetime . datetime ) -> Optional [ Interval ] :
"""Returns our first interval that starts with the ` ` start ` ` parameter , or
` ` None ` ` ."""
|
for i in self . intervals :
if i . start == start :
return i
return None
|
def dpar ( self , cl = 1 ) :
"""Return dpar - style executable assignment for parameter
Default is to write CL version of code ; if cl parameter is
false , writes Python executable code instead . Note that
dpar doesn ' t even work for arrays in the CL , so we just use
Python syntax here ."""
|
sval = list ( map ( self . toString , self . value , len ( self . value ) * [ 1 ] ) )
for i in range ( len ( sval ) ) :
if sval [ i ] == "" :
sval [ i ] = "None"
s = "%s = [%s]" % ( self . name , ', ' . join ( sval ) )
return s
|
def is_starred ( self ) :
"""Check to see if this gist is starred by the authenticated user .
: returns : bool - - True if it is starred , False otherwise"""
|
url = self . _build_url ( 'star' , base_url = self . _api )
return self . _boolean ( self . _get ( url ) , 204 , 404 )
|
def follower_num ( self ) :
"""获取问题关注人数 .
: return : 问题关注人数
: rtype : int"""
|
follower_num_block = self . soup . find ( 'div' , class_ = 'zg-gray-normal' )
# 无人关注时 找不到对应block , 直接返回0 ( 感谢知乎用户 段晓晨 提出此问题 )
if follower_num_block is None or follower_num_block . strong is None :
return 0
return int ( follower_num_block . strong . text )
|
def login_open_sheet ( oauth_key_file , spreadsheet ) :
"""Connect to Google Docs spreadsheet and return the first worksheet ."""
|
try :
scope = [ 'https://spreadsheets.google.com/feeds' ]
credentials = ServiceAccountCredentials . from_json_keyfile_name ( oauth_key_file , scope )
gc = gspread . authorize ( credentials )
worksheet = gc . open ( spreadsheet ) . sheet1
return worksheet
except Exception as ex :
print ( 'Unable to login and get spreadsheet. Check OAuth credentials, spreadsheet name, and make sure spreadsheet is shared to the client_email address in the OAuth .json file!' )
print ( 'Google sheet login failed with error:' , ex )
sys . exit ( 1 )
|
def history ( self , dates = None , linreg_since = None , lin_reg_days = 20 ) :
'''Works only on a Result that has _ start and _ end columns .
: param dates : list of dates to query
: param linreg _ since : estimate future values using linear regression .
: param lin _ reg _ days : number of past days to use as prediction basis'''
|
dates = dates or self . get_dates_range ( )
vals = [ self . on_date ( dt , only_count = True ) for dt in dates ]
ret = Series ( vals , index = dates )
if linreg_since is not None :
ret = self . _linreg_future ( ret , linreg_since , lin_reg_days )
return ret . sort_index ( )
|
def cartesian_to_axial ( x , y , size , orientation , aspect_scale = 1 ) :
'''Map Cartesion * ( x , y ) * points to axial * ( q , r ) * coordinates of enclosing
tiles .
This function was adapted from :
https : / / www . redblobgames . com / grids / hexagons / # pixel - to - hex
Args :
x ( array [ float ] ) :
A NumPy array of x - coordinates to convert
y ( array [ float ] ) :
A NumPy array of y - coordinates to convert
size ( float ) :
The size of the hexagonal tiling .
The size is defined as the distance from the center of a hexagon
to the top corner for " pointytop " orientation , or from the center
to a side corner for " flattop " orientation .
orientation ( str ) :
Whether the hex tile orientation should be " pointytop " or
" flattop " .
aspect _ scale ( float , optional ) :
Scale the hexagons in the " cross " dimension .
For " pointytop " orientations , hexagons are scaled in the horizontal
direction . For " flattop " , they are scaled in vertical direction .
When working with a plot with ` ` aspect _ scale ! = 1 ` ` , it may be
useful to set this value to match the plot .
Returns :
( array [ int ] , array [ int ] )'''
|
HEX_FLAT = [ 2.0 / 3.0 , 0.0 , - 1.0 / 3.0 , np . sqrt ( 3.0 ) / 3.0 ]
HEX_POINTY = [ np . sqrt ( 3.0 ) / 3.0 , - 1.0 / 3.0 , 0.0 , 2.0 / 3.0 ]
coords = HEX_FLAT if orientation == 'flattop' else HEX_POINTY
x = x / size * ( aspect_scale if orientation == "pointytop" else 1 )
y = - y / size / ( aspect_scale if orientation == "flattop" else 1 )
q = coords [ 0 ] * x + coords [ 1 ] * y
r = coords [ 2 ] * x + coords [ 3 ] * y
return _round_hex ( q , r )
|
def run ( self ) :
"""Run the multiopt parser"""
|
self . parser = MultioptOptionParser ( usage = "%prog <command> [options] [args]" , prog = self . clsname , version = self . version , option_list = self . global_options , description = self . desc_short , commands = self . command_set , epilog = self . footer )
try :
self . options , self . args = self . parser . parse_args ( self . argv )
except Exception , e :
print str ( e )
pass
if len ( self . args ) < 1 :
self . parser . print_lax_help ( )
return 2
self . command = self . args . pop ( 0 )
showHelp = False
if self . command == 'help' :
if len ( self . args ) < 1 :
self . parser . print_lax_help ( )
return 2
else :
self . command = self . args . pop ( )
showHelp = True
if self . command not in self . valid_commands :
self . parser . print_cmd_error ( self . command )
return 2
self . command_set [ self . command ] . set_cmdname ( self . command )
subcmd_parser = self . command_set [ self . command ] . get_parser ( self . clsname , self . version , self . global_options )
subcmd_options , subcmd_args = subcmd_parser . parse_args ( self . args )
if showHelp :
subcmd_parser . print_help_long ( )
return 1
try :
self . command_set [ self . command ] . func ( subcmd_options , * subcmd_args )
except ( CommandError , TypeError ) , e : # self . parser . print _ exec _ error ( self . command , str ( e ) )
subcmd_parser . print_exec_error ( self . command , str ( e ) )
print
# @ TODO show command help
# self . parser . print _ lax _ help ( )
return 2
return 1
|
def set_password ( sender , ** kwargs ) :
"""Encrypts password of the user ."""
|
if sender . model_class . __name__ == 'User' :
usr = kwargs [ 'object' ]
if not usr . password . startswith ( '$pbkdf2' ) :
usr . set_password ( usr . password )
usr . save ( )
|
def transfer_image ( self , image_id_or_slug , region_id ) :
"""This method allows you to transfer an image to a specified region .
Required parameters
image _ id :
Numeric , this is the id of the image you would like to transfer .
region _ id
Numeric , this is the id of the region to which you would like to transfer ."""
|
if not image_id_or_slug :
msg = 'image_id_or_slug is required to transfer an image!'
raise DOPException ( msg )
if not region_id :
raise DOPException ( 'region_id is required to transfer an image!' )
params = { 'region_id' : region_id }
json = self . request ( '/images/%s/transfer' % image_id_or_slug , method = 'GET' , params = params )
status = json . get ( 'status' )
if status == 'OK' :
return json . get ( 'event_id' )
else :
message = json . get ( 'message' )
raise DOPException ( '[%s]: %s' % ( status , message ) )
|
def _email ( name , * , allow_unverified = False ) :
"""This decorator is used to turn an e function into an email sending function !
The name parameter is the name of the email we ' re going to be sending ( used to
locate the templates on the file system ) .
The allow _ unverified kwarg flags whether we will send this email to an unverified
email or not . We generally do not want to do this , but some emails are important
enough or have special requirements that require it .
Functions that are decorated by this need to accept two positional arguments , the
first argument is the Pyramid request object , and the second argument is either
a single User , or a list of Users . These users represent the recipients of this
email . Additional keyword arguments are supported , but are not otherwise restricted .
Functions decorated by this must return a mapping of context variables that will
ultimately be returned , but which will also be used to render the templates for
the emails .
Thus this function can decorate functions with a signature like so :
def foo (
request : Request , user _ or _ users : Union [ User , List [ User ] ]
) - > Mapping [ str , Any ] :
Finally , if the email needs to be sent to an address * other * than the user ' s primary
email address , instead of a User object , a tuple of ( User , Email ) objects may be
used in place of a User object ."""
|
def inner ( fn ) :
@ functools . wraps ( fn )
def wrapper ( request , user_or_users , ** kwargs ) :
if isinstance ( user_or_users , ( list , set ) ) :
recipients = user_or_users
else :
recipients = [ user_or_users ]
context = fn ( request , user_or_users , ** kwargs )
msg = EmailMessage . from_template ( name , context , request = request )
for recipient in recipients :
if isinstance ( recipient , tuple ) :
user , email = recipient
else :
user , email = recipient , None
_send_email_to_user ( request , user , msg , email = email , allow_unverified = allow_unverified )
return context
return wrapper
return inner
|
def filepath_to_unicode_strings ( self , filepath ) :
"""Read text out of an input file .
The default just reads the text , converts to unicode and yields one
unicode string .
Subclasses can override this function in order to preprocess , and can
yield any number of strings .
Args :
filepath : a string
Yields :
unicode strings ."""
|
f = tf . gfile . Open ( filepath )
b = f . read ( )
yield text_encoder . to_unicode_ignore_errors ( b )
|
def downfile ( self , remotefile , localpath = '' ) :
'''Usage : downfile < remotefile > [ localpath ] - download a remote file .
remotefile - remote file at Baidu Yun ( after app root directory at Baidu Yun )
localpath - local path .
if it ends with ' / ' or ' \\ ' , it specifies the local directory
if it specifies an existing directory , it is the local directory
if not specified , the local directory is the current directory ' . '
otherwise , it specifies the local file name
To stream a file using downfile , you can use the ' mkfifo ' trick with omxplayer etc . :
mkfifo / tmp / omx
bypy . py downfile < remotepath > / tmp / omx &
omxplayer / tmp / omx'''
|
localfile = localpath
if not localpath :
localfile = os . path . basename ( remotefile )
elif localpath [ - 1 ] == '\\' or localpath [ - 1 ] == '/' or os . path . isdir ( localpath ) : # localfile = os . path . join ( localpath , os . path . basename ( remotefile ) )
localfile = joinpath ( localpath , os . path . basename ( remotefile ) )
else :
localfile = localpath
pcsrpath = get_pcs_path ( remotefile )
return self . __downfile ( pcsrpath , localfile )
|
async def stepper_config ( self , command ) :
"""This method configures 4 pins for stepper motor operation .
This is a FirmataPlus feature .
: param command : { " method " : " stepper _ config " , " params " : [ STEPS _ PER _ REVOLUTION , [ PIN1 , PIN2 , PIN3 , PIN4 ] ] }
: returns : No message returned ."""
|
steps_per_revs = int ( command [ 0 ] )
pins = command [ 1 ]
pin1 = int ( pins [ 0 ] )
pin2 = int ( pins [ 1 ] )
pin3 = int ( pins [ 2 ] )
pin4 = int ( pins [ 3 ] )
await self . core . stepper_config ( steps_per_revs , [ pin1 , pin2 , pin3 , pin4 ] )
|
def calc_area_under_PSD ( self , lowerFreq , upperFreq ) :
"""Sums the area under the PSD from lowerFreq to upperFreq .
Parameters
lowerFreq : float
The lower limit of frequency to sum from
upperFreq : float
The upper limit of frequency to sum to
Returns
AreaUnderPSD : float
The area under the PSD from lowerFreq to upperFreq"""
|
Freq_startAreaPSD = take_closest ( self . freqs , lowerFreq )
index_startAreaPSD = int ( _np . where ( self . freqs == Freq_startAreaPSD ) [ 0 ] [ 0 ] )
Freq_endAreaPSD = take_closest ( self . freqs , upperFreq )
index_endAreaPSD = int ( _np . where ( self . freqs == Freq_endAreaPSD ) [ 0 ] [ 0 ] )
AreaUnderPSD = sum ( self . PSD [ index_startAreaPSD : index_endAreaPSD ] )
return AreaUnderPSD
|
def update ( self , unique_name = values . unset , callback_method = values . unset , callback_url = values . unset , friendly_name = values . unset , rate_plan = values . unset , status = values . unset , commands_callback_method = values . unset , commands_callback_url = values . unset , sms_fallback_method = values . unset , sms_fallback_url = values . unset , sms_method = values . unset , sms_url = values . unset , voice_fallback_method = values . unset , voice_fallback_url = values . unset , voice_method = values . unset , voice_url = values . unset ) :
"""Update the SimInstance
: param unicode unique _ name : The unique _ name
: param unicode callback _ method : The callback _ method
: param unicode callback _ url : The callback _ url
: param unicode friendly _ name : The friendly _ name
: param unicode rate _ plan : The rate _ plan
: param unicode status : The status
: param unicode commands _ callback _ method : The commands _ callback _ method
: param unicode commands _ callback _ url : The commands _ callback _ url
: param unicode sms _ fallback _ method : The sms _ fallback _ method
: param unicode sms _ fallback _ url : The sms _ fallback _ url
: param unicode sms _ method : The sms _ method
: param unicode sms _ url : The sms _ url
: param unicode voice _ fallback _ method : The voice _ fallback _ method
: param unicode voice _ fallback _ url : The voice _ fallback _ url
: param unicode voice _ method : The voice _ method
: param unicode voice _ url : The voice _ url
: returns : Updated SimInstance
: rtype : twilio . rest . preview . wireless . sim . SimInstance"""
|
return self . _proxy . update ( unique_name = unique_name , callback_method = callback_method , callback_url = callback_url , friendly_name = friendly_name , rate_plan = rate_plan , status = status , commands_callback_method = commands_callback_method , commands_callback_url = commands_callback_url , sms_fallback_method = sms_fallback_method , sms_fallback_url = sms_fallback_url , sms_method = sms_method , sms_url = sms_url , voice_fallback_method = voice_fallback_method , voice_fallback_url = voice_fallback_url , voice_method = voice_method , voice_url = voice_url , )
|
def _post_resource ( self , body ) :
"""Create new resources and associated attributes .
Example :
acs . post _ resource ( [
" resourceIdentifier " : " masaya " ,
" parents " : [ ] ,
" attributes " : [
" issuer " : " default " ,
" name " : " country " ,
" value " : " Nicaragua "
The issuer is effectively a namespace , and in policy evaluations you
identify an attribute by a specific namespace . Many examples provide
a URL but it could be any arbitrary string .
The body is a list , so many resources can be added at the same time ."""
|
assert isinstance ( body , ( list ) ) , "POST for requires body to be a list"
uri = self . _get_resource_uri ( )
return self . service . _post ( uri , body )
|
def add_put ( self , * args , ** kwargs ) :
"""Shortcut for add _ route with method PUT"""
|
return self . add_route ( hdrs . METH_PUT , * args , ** kwargs )
|
def removeFriend ( self , user ) :
"""Remove the specified user from all sharing .
Parameters :
user ( str ) : MyPlexUser , username , email of the user to be added ."""
|
user = self . user ( user )
url = self . FRIENDUPDATE if user . friend else self . REMOVEINVITE
url = url . format ( userId = user . id )
return self . query ( url , self . _session . delete )
|
def ext_pillar ( minion_id , pillar , # pylint : disable = W0613
key = None , only = ( ) ) :
'''Read pillar data from Foreman via its API .'''
|
url = __opts__ [ 'foreman.url' ]
user = __opts__ [ 'foreman.user' ]
password = __opts__ [ 'foreman.password' ]
api = __opts__ [ 'foreman.api' ]
verify = __opts__ [ 'foreman.verifyssl' ]
certfile = __opts__ [ 'foreman.certfile' ]
keyfile = __opts__ [ 'foreman.keyfile' ]
cafile = __opts__ [ 'foreman.cafile' ]
lookup_parameters = __opts__ [ 'foreman.lookup_parameters' ]
log . info ( "Querying Foreman at %r for information for %r" , url , minion_id )
try : # Foreman API version 1 is currently not supported
if api != 2 :
log . error ( 'Foreman API v2 is supported only, please specify' 'version 2 in your Salt master config' )
raise Exception
headers = { 'accept' : 'version=' + six . text_type ( api ) + ',application/json' }
if verify and cafile is not None :
verify = cafile
resp = requests . get ( url + '/hosts/' + minion_id , auth = ( user , password ) , headers = headers , verify = verify , cert = ( certfile , keyfile ) )
result = resp . json ( )
log . debug ( 'Raw response of the Foreman request is %r' , result )
if lookup_parameters :
parameters = dict ( )
for param in result [ 'all_parameters' ] :
parameters . update ( { param [ 'name' ] : param [ 'value' ] } )
result [ 'parameters' ] = parameters
if only :
result = dict ( ( k , result [ k ] ) for k in only if k in result )
except Exception :
log . exception ( 'Could not fetch host data via Foreman API:' )
return { }
if key :
result = { key : result }
return result
|
def plot_fit ( self , intervals = True , ** kwargs ) :
"""Plots the fit of the Gaussian process model to the data
Parameters
beta : np . array
Contains untransformed starting values for latent variables
intervals : Boolean
Whether to plot uncertainty intervals or not
Returns
None ( plots the fit of the function )"""
|
import matplotlib . pyplot as plt
import seaborn as sns
figsize = kwargs . get ( 'figsize' , ( 10 , 7 ) )
date_index = self . index [ self . max_lag : ]
expectation = self . expected_values ( self . latent_variables . get_z_values ( ) )
variance = self . variance_values ( self . latent_variables . get_z_values ( ) )
upper = expectation + 1.98 * np . power ( np . diag ( variance ) , 0.5 )
lower = expectation - 1.98 * np . power ( np . diag ( variance ) , 0.5 )
plt . figure ( figsize = figsize )
plt . subplot ( 2 , 2 , 1 )
plt . title ( self . data_name + " Raw" )
plt . plot ( date_index , self . data * self . _norm_std + self . _norm_mean , 'k' )
plt . subplot ( 2 , 2 , 2 )
plt . title ( self . data_name + " Raw and Expected" )
plt . plot ( date_index , self . data * self . _norm_std + self . _norm_mean , 'k' , alpha = 0.2 )
plt . plot ( date_index , self . expected_values ( self . latent_variables . get_z_values ( ) ) * self . _norm_std + self . _norm_mean , 'b' )
plt . subplot ( 2 , 2 , 3 )
plt . title ( self . data_name + " Raw and Expected (with intervals)" )
if intervals == True :
plt . fill_between ( date_index , lower * self . _norm_std + self . _norm_mean , upper * self . _norm_std + self . _norm_mean , alpha = 0.2 )
plt . plot ( date_index , self . data * self . _norm_std + self . _norm_mean , 'k' , alpha = 0.2 )
plt . plot ( date_index , self . expected_values ( self . latent_variables . get_z_values ( ) ) * self . _norm_std + self . _norm_mean , 'b' )
plt . subplot ( 2 , 2 , 4 )
plt . title ( "Expected " + self . data_name + " (with intervals)" )
if intervals == True :
plt . fill_between ( date_index , lower * self . _norm_std + self . _norm_mean , upper * self . _norm_std + self . _norm_mean , alpha = 0.2 )
plt . plot ( date_index , self . expected_values ( self . latent_variables . get_z_values ( ) ) * self . _norm_std + self . _norm_mean , 'b' )
plt . show ( )
|
def find_all ( self , pattern ) :
"""Return the subset of this RcParams dictionary whose keys match ,
using : func : ` re . search ` , the given ` ` pattern ` ` .
Parameters
pattern : str
pattern as suitable for re . compile
Returns
RcParams
RcParams instance with entries that match the given ` pattern `
Notes
Changes to the returned dictionary are ( different from
: meth : ` find _ and _ replace ` are * not * propagated to the parent RcParams
dictionary .
See Also
find _ and _ replace"""
|
pattern_re = re . compile ( pattern )
ret = RcParams ( )
ret . defaultParams = self . defaultParams
ret . update ( ( key , value ) for key , value in self . items ( ) if pattern_re . search ( key ) )
return ret
|
def ReadUntilClose ( self ) :
"""Yield packets until a Close packet is received ."""
|
while True :
cmd , data = self . ReadUntil ( b'CLSE' , b'WRTE' )
if cmd == b'CLSE' :
self . _Send ( b'CLSE' , arg0 = self . local_id , arg1 = self . remote_id )
break
if cmd != b'WRTE' :
if cmd == b'FAIL' :
raise usb_exceptions . AdbCommandFailureException ( 'Command failed.' , data )
raise InvalidCommandError ( 'Expected a WRITE or a CLOSE, got %s (%s)' , cmd , data )
yield data
|
def set ( self , mode = None ) :
"""Set the coloring mode
If enabled , some objects ( like case run Status ) are printed in color
to easily spot failures , errors and so on . By default the feature is
enabled when script is attached to a terminal . Possible values are : :
COLOR = 0 . . . COLOR _ OFF . . . . coloring disabled
COLOR = 1 . . . COLOR _ ON . . . . . coloring enabled
COLOR = 2 . . . COLOR _ AUTO . . . if terminal attached ( default )
Environment variable COLOR can be used to set up the coloring to the
desired mode without modifying code ."""
|
# Detect from the environment if no mode given ( only once )
if mode is None : # Nothing to do if already detected
if self . _mode is not None :
return
# Detect from the environment variable COLOR
try :
mode = int ( os . environ [ "COLOR" ] )
except StandardError :
mode = COLOR_AUTO
elif mode < 0 or mode > 2 :
raise RuntimeError ( "Invalid color mode '{0}'" . format ( mode ) )
self . _mode = mode
log . debug ( "Coloring {0} ({1})" . format ( "enabled" if self . enabled ( ) else "disabled" , self . MODES [ self . _mode ] ) )
|
def getAllReceivers ( sender = Any , signal = Any ) :
"""Get list of all receivers from global tables
This gets all receivers which should receive
the given signal from sender , each receiver should
be produced only once by the resulting generator"""
|
receivers = { }
for set in ( # Get receivers that receive * this * signal from * this * sender .
getReceivers ( sender , signal ) , # Add receivers that receive * any * signal from * this * sender .
getReceivers ( sender , Any ) , # Add receivers that receive * this * signal from * any * sender .
getReceivers ( Any , signal ) , # Add receivers that receive * any * signal from * any * sender .
getReceivers ( Any , Any ) , ) :
for receiver in set :
if receiver : # filter out dead instance - method weakrefs
try :
if receiver not in receivers :
receivers [ receiver ] = 1
yield receiver
except TypeError : # dead weakrefs raise TypeError on hash . . .
pass
|
def _bytes_to_human ( self , B ) :
'''Return the given bytes as a human friendly KB , MB , GB , or TB string'''
|
KB = float ( 1024 )
MB = float ( KB ** 2 )
# 1,048,576
GB = float ( KB ** 3 )
# 1,073,741,824
TB = float ( KB ** 4 )
# 1,099,511,627,776
if B < KB :
return '{0} B' . format ( B )
B = float ( B )
if KB <= B < MB :
return '{0:.2f} KB' . format ( B / KB )
elif MB <= B < GB :
return '{0:.2f} MB' . format ( B / MB )
elif GB <= B < TB :
return '{0:.2f} GB' . format ( B / GB )
elif TB <= B :
return '{0:.2f} TB' . format ( B / TB )
|
def get_function ( self ) :
"""Gets the ` ` Function ` ` for this authorization .
return : ( osid . authorization . Function ) - the function
raise : OperationFailed - unable to complete request
* compliance : mandatory - - This method must be implemented . *"""
|
# Implemented from template for osid . learning . Activity . get _ objective
if not bool ( self . _my_map [ 'functionId' ] ) :
raise errors . IllegalState ( 'function empty' )
mgr = self . _get_provider_manager ( 'AUTHORIZATION' )
if not mgr . supports_function_lookup ( ) :
raise errors . OperationFailed ( 'Authorization does not support Function lookup' )
lookup_session = mgr . get_function_lookup_session ( proxy = getattr ( self , "_proxy" , None ) )
lookup_session . use_federated_vault_view ( )
return lookup_session . get_function ( self . get_function_id ( ) )
|
def parse ( self ) :
"""Parses everyting into a datastructure that looks like :
result = [ {
' origin _ filename ' : ' ' ,
' result _ filename ' : ' ' ,
' origin _ lines ' : [ ] , / / all lines of the original file
' result _ lines ' : [ ] , / / all lines of the newest file
' added _ lines ' : [ ] , / / all lines added to the result file
' removed _ lines ' : [ ] , / / all lines removed from the result file"""
|
result = [ ]
z = None
before_line_number , after_line_number = 0 , 0
position = 0
for line in self . diff_text . splitlines ( ) : # New File
match = re . search ( r'diff .*a/(?P<origin_filename>.*) ' r'b/(?P<result_filename>.*)' , line )
if match is not None :
if z is not None :
result . append ( z )
z = Entry ( match . group ( 'origin_filename' ) , match . group ( 'result_filename' ) )
position = 0
continue
if self . should_skip_line ( line ) :
continue
header = diff_re . search ( line )
if header is not None :
before_line_number = int ( header . group ( 'removed_start' ) )
after_line_number = int ( header . group ( 'added_start' ) )
position += 1
continue
# removed line
if line . startswith ( '-' ) :
z . new_removed ( Line ( before_line_number , position , line [ 1 : ] ) )
z . new_origin ( Line ( before_line_number , position , line [ 1 : ] ) )
before_line_number += 1
# added line
elif line . startswith ( '+' ) :
z . new_added ( Line ( after_line_number , position , line [ 1 : ] ) )
z . new_result ( Line ( after_line_number , position , line [ 1 : ] ) )
after_line_number += 1
# untouched context line .
else :
z . new_origin ( Line ( before_line_number , position , line [ 1 : ] ) )
z . new_result ( Line ( after_line_number , position , line [ 1 : ] ) )
before_line_number += 1
after_line_number += 1
position += 1
if z is not None :
result . append ( z )
return result
|
def create ( fc_layers = None , dropout = None , pretrained = True ) :
"""Vel factory function"""
|
def instantiate ( ** _ ) :
return Resnet34 ( fc_layers , dropout , pretrained )
return ModelFactory . generic ( instantiate )
|
def _backtracking ( problem , assignment , domains , variable_chooser , values_sorter , inference = True ) :
'''Internal recursive backtracking algorithm .'''
|
from simpleai . search . arc import arc_consistency_3
if len ( assignment ) == len ( problem . variables ) :
return assignment
pending = [ v for v in problem . variables if v not in assignment ]
variable = variable_chooser ( problem , pending , domains )
values = values_sorter ( problem , assignment , variable , domains )
for value in values :
new_assignment = deepcopy ( assignment )
new_assignment [ variable ] = value
if not _count_conflicts ( problem , new_assignment ) : # TODO on aima also checks if using fc
new_domains = deepcopy ( domains )
new_domains [ variable ] = [ value ]
if not inference or arc_consistency_3 ( new_domains , problem . constraints ) :
result = _backtracking ( problem , new_assignment , new_domains , variable_chooser , values_sorter , inference = inference )
if result :
return result
return None
|
def quickstart ( hosts , func , only_authenticate = False , ** kwargs ) :
"""Like quickrun ( ) , but automatically logs into the host before passing
the connection to the callback function .
: type hosts : Host | list [ Host ]
: param hosts : A list of Host objects .
: type func : function
: param func : The callback function .
: type only _ authenticate : bool
: param only _ authenticate : don ' t authorize , just authenticate ?
: type kwargs : dict
: param kwargs : Passed to the Exscript . Queue constructor ."""
|
if only_authenticate :
quickrun ( hosts , autoauthenticate ( ) ( func ) , ** kwargs )
else :
quickrun ( hosts , autologin ( ) ( func ) , ** kwargs )
|
def get_coordination_symmetry_measures ( self , only_minimum = True , all_csms = True , optimization = None ) :
"""Returns the continuous symmetry measures of the current local geometry in a dictionary .
: return : the continuous symmetry measures of the current local geometry in a dictionary ."""
|
test_geometries = self . allcg . get_implemented_geometries ( len ( self . local_geometry . coords ) )
if len ( self . local_geometry . coords ) == 1 :
if len ( test_geometries ) == 0 :
return { }
result_dict = { 'S:1' : { 'csm' : 0.0 , 'indices' : [ 0 ] , 'algo' : 'EXPLICIT' , 'local2perfect_map' : { 0 : 0 } , 'perfect2local_map' : { 0 : 0 } , 'scaling_factor' : None , 'rotation_matrix' : None , 'translation_vector' : None } }
if all_csms :
for csmtype in [ 'wocs_ctwocc' , 'wocs_ctwcc' , 'wocs_csc' , 'wcs_ctwocc' , 'wcs_ctwcc' , 'wcs_csc' ] :
result_dict [ 'S:1' ] [ 'csm_{}' . format ( csmtype ) ] = 0.0
result_dict [ 'S:1' ] [ 'scaling_factor_{}' . format ( csmtype ) ] = None
result_dict [ 'S:1' ] [ 'rotation_matrix_{}' . format ( csmtype ) ] = None
result_dict [ 'S:1' ] [ 'translation_vector_{}' . format ( csmtype ) ] = None
return result_dict
result_dict = { }
for geometry in test_geometries :
self . perfect_geometry = AbstractGeometry . from_cg ( cg = geometry , centering_type = self . centering_type , include_central_site_in_centroid = self . include_central_site_in_centroid )
points_perfect = self . perfect_geometry . points_wcs_ctwcc ( )
cgsm = self . coordination_geometry_symmetry_measures ( geometry , points_perfect = points_perfect , optimization = optimization )
result , permutations , algos , local2perfect_maps , perfect2local_maps = cgsm
if only_minimum :
if len ( result ) > 0 :
imin = np . argmin ( [ rr [ 'symmetry_measure' ] for rr in result ] )
if geometry . algorithms is not None :
algo = algos [ imin ]
else :
algo = algos
result_dict [ geometry . mp_symbol ] = { 'csm' : result [ imin ] [ 'symmetry_measure' ] , 'indices' : permutations [ imin ] , 'algo' : algo , 'local2perfect_map' : local2perfect_maps [ imin ] , 'perfect2local_map' : perfect2local_maps [ imin ] , 'scaling_factor' : 1.0 / result [ imin ] [ 'scaling_factor' ] , 'rotation_matrix' : np . linalg . inv ( result [ imin ] [ 'rotation_matrix' ] ) , 'translation_vector' : result [ imin ] [ 'translation_vector' ] }
if all_csms :
self . _update_results_all_csms ( result_dict , permutations , imin , geometry )
else :
result_dict [ geometry . mp_symbol ] = { 'csm' : result , 'indices' : permutations , 'algo' : algos , 'local2perfect_map' : local2perfect_maps , 'perfect2local_map' : perfect2local_maps }
return result_dict
|
def to_dlpack_for_write ( data ) :
"""Returns a reference view of NDArray that represents as DLManagedTensor until
all previous read / write operations on the current array are finished .
Parameters
data : NDArray
input data .
Returns
PyCapsule ( the pointer of DLManagedTensor )
a reference view of NDArray that represents as DLManagedTensor .
Examples
> > > x = mx . nd . ones ( ( 2,3 ) )
> > > w = mx . nd . to _ dlpack _ for _ write ( x )
> > > type ( w )
< class ' PyCapsule ' >
> > > u = mx . nd . from _ dlpack ( w )
> > > u + = 1
[ [ 2 . 2 . 2 . ]
[2 . 2 . 2 . ] ]
< NDArray 2x3 @ cpu ( 0 ) >"""
|
check_call ( _LIB . MXNDArrayWaitToWrite ( data . handle ) )
dlpack = DLPackHandle ( )
check_call ( _LIB . MXNDArrayToDLPack ( data . handle , ctypes . byref ( dlpack ) ) )
return ctypes . pythonapi . PyCapsule_New ( dlpack , _c_str_dltensor , _c_dlpack_deleter )
|
def experiments ( auth , label = None , project = None , subject = None ) :
'''Retrieve Experiment tuples for experiments returned by this function .
Example :
> > > import yaxil
> > > auth = yaxil . XnatAuth ( url = ' . . . ' , username = ' . . . ' , password = ' . . . ' )
> > > yaxil . experiment ( auth , ' AB1234C ' )
Experiment ( uri = u ' / data / experiments / XNAT _ E0001 ' , label = u ' AB1234C ' , id = u ' XNAT _ E0001 ' ,
project = u ' MyProject ' , subject _ id = u ' XNAT _ S0001 ' , subject _ label = ' ABC ' )
: param auth : XNAT authentication
: type auth : : mod : ` yaxil . XnatAuth `
: param label : XNAT Experiment label
: type label : str
: param project : XNAT Experiment Project
: type project : str
: param subject : YAXIL Subject
: type subject : : mod : ` yaxil . Subject `
: returns : Experiment object
: rtype : : mod : ` yaxil . Experiment `'''
|
if subject and ( label or project ) :
raise ValueError ( 'cannot provide subject with label or project' )
url = '{0}/data/experiments' . format ( auth . url . rstrip ( '/' ) )
logger . debug ( 'issuing http request %s' , url )
# compile query string
columns = [ 'ID' , 'label' , 'project' , 'xnat:subjectassessordata/subject_id' , 'subject_label' , 'insert_date' ]
payload = { 'columns' : ',' . join ( columns ) }
if label :
payload [ 'label' ] = label
if project :
payload [ 'project' ] = project
if subject :
payload [ 'project' ] = subject . project
payload [ 'xnat:subjectassessordata/subject_id' ] = subject . id
# submit request
r = requests . get ( url , params = payload , auth = ( auth . username , auth . password ) , verify = CHECK_CERTIFICATE )
# validate response
if r . status_code != requests . codes . ok :
raise AccessionError ( 'response not ok ({0}) from {1}' . format ( r . status_code , r . url ) )
try :
results = r . json ( )
__quick_validate ( results )
except ResultSetError as e :
raise ResultSetError ( '{0} from {1}' . format ( e . message , r . url ) )
results = results [ 'ResultSet' ]
if int ( results [ 'totalRecords' ] ) == 0 :
raise NoExperimentsError ( 'no records returned for {0}' . format ( r . url ) )
for item in results [ 'Result' ] :
yield Experiment ( uri = item [ 'URI' ] , id = item [ 'ID' ] , project = item [ 'project' ] , label = item [ 'label' ] , subject_id = item [ 'subject_ID' ] , subject_label = item [ 'subject_label' ] , archived_date = item [ 'insert_date' ] )
|
def _calc_sampleset ( w1 , w2 , step , minimal ) :
"""Calculate sampleset for each model ."""
|
if minimal :
arr = [ w1 - step , w1 , w2 , w2 + step ]
else :
arr = np . arange ( w1 - step , w2 + step + step , step )
return arr
|
def trigger_on_off ( request , trigger_id ) :
"""enable / disable the status of the trigger then go back home
: param request : request object
: param trigger _ id : the trigger ID to switch the status to True or False
: type request : HttpRequest object
: type trigger _ id : int
: return render
: rtype HttpResponse"""
|
now = arrow . utcnow ( ) . to ( settings . TIME_ZONE ) . format ( 'YYYY-MM-DD HH:mm:ssZZ' )
trigger = get_object_or_404 ( TriggerService , pk = trigger_id )
if trigger . status :
title = 'disabled'
title_trigger = _ ( 'Set this trigger on' )
btn = 'success'
trigger . status = False
else :
title = _ ( 'Edit your service' )
title_trigger = _ ( 'Set this trigger off' )
btn = 'primary'
trigger . status = True
# set the trigger to the current date when the
# the trigger is back online
trigger . date_triggered = now
trigger . save ( )
return render ( request , 'triggers/trigger_line.html' , { 'trigger' : trigger , 'title' : title , 'title_trigger' : title_trigger , 'btn' : btn , 'fire' : settings . DJANGO_TH . get ( 'fire' , False ) } )
|
def BuscarLocalidades ( self , cod_prov , cod_localidad = None , consultar = True ) :
"Devuelve la localidad o la consulta en AFIP ( uso interno )"
|
# si no se especifíca cod _ localidad , es util para reconstruir la cache
import wslpg_datos as datos
if not str ( cod_localidad ) in datos . LOCALIDADES and consultar :
d = self . ConsultarLocalidadesPorProvincia ( cod_prov , sep = None )
try : # actualizar el diccionario persistente ( shelve )
datos . LOCALIDADES . update ( d )
except Exception , e :
print "EXCEPCION CAPTURADA" , e
# capturo errores por permisos ( o por concurrencia )
datos . LOCALIDADES = d
return datos . LOCALIDADES . get ( str ( cod_localidad ) , "" )
|
def TransformerEncoder ( vocab_size , num_classes = 10 , feature_depth = 512 , feedforward_depth = 2048 , num_layers = 6 , num_heads = 8 , dropout = 0.1 , max_len = 2048 , mode = 'train' ) :
"""Transformer encoder .
Args :
vocab _ size : int : vocab size
num _ classes : how many classes on output
feature _ depth : int : depth of embedding
feedforward _ depth : int : depth of feed - forward layer
num _ layers : int : number of encoder / decoder layers
num _ heads : int : number of attention heads
dropout : float : dropout rate ( how much to drop out )
max _ len : int : maximum symbol length for positional encoding
mode : str : ' train ' or ' eval '
Returns :
the Transformer encoder layer ."""
|
input_embedding = layers . Serial ( layers . Embedding ( feature_depth , vocab_size ) , layers . Dropout ( rate = dropout , mode = mode ) , layers . PositionalEncoding ( max_len = max_len ) )
return layers . Serial ( layers . Branch ( ) , # Branch input to create embedding and mask .
layers . Parallel ( input_embedding , layers . PaddingMask ( ) ) , layers . Serial ( * [ EncoderLayer ( feature_depth , feedforward_depth , num_heads , dropout , mode ) for _ in range ( num_layers ) ] ) , layers . FirstBranch ( ) , # Drop the mask .
layers . LayerNorm ( ) , layers . Mean ( axis = 1 ) , # Average on length .
layers . Dense ( num_classes ) , layers . LogSoftmax ( ) )
|
async def message_field ( self , msg , field , fvalue = None ) :
"""Dumps / Loads message field
: param msg :
: param field :
: param fvalue : explicit value for dump
: return :"""
|
fname , ftype , params = field [ 0 ] , field [ 1 ] , field [ 2 : ]
try :
self . tracker . push_field ( fname )
if self . writing :
await self . _dump_message_field ( self . iobj , msg , field , fvalue = fvalue )
else :
await self . _load_message_field ( self . iobj , msg , field )
self . tracker . pop ( )
except Exception as e :
raise helpers . ArchiveException ( e , tracker = self . tracker ) from e
|
def _cast_empty_df_dtypes ( schema_fields , df ) :
"""Cast any columns in an empty dataframe to correct type .
In an empty dataframe , pandas cannot choose a dtype unless one is
explicitly provided . The _ bqschema _ to _ nullsafe _ dtypes ( ) function only
provides dtypes when the dtype safely handles null values . This means
that empty int64 and boolean columns are incorrectly classified as
` ` object ` ` ."""
|
if not df . empty :
raise ValueError ( "DataFrame must be empty in order to cast non-nullsafe dtypes" )
dtype_map = { "BOOLEAN" : bool , "INTEGER" : np . int64 }
for field in schema_fields :
column = str ( field [ "name" ] )
if field [ "mode" ] . upper ( ) == "REPEATED" :
continue
dtype = dtype_map . get ( field [ "type" ] . upper ( ) )
if dtype :
df [ column ] = df [ column ] . astype ( dtype )
return df
|
def _set_bpdu_mac ( self , v , load = False ) :
"""Setter method for bpdu _ mac , mapped from YANG variable / interface / port _ channel / spanning _ tree / bpdu _ mac ( enumeration )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ bpdu _ mac is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ bpdu _ mac ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = RestrictedClassType ( base_type = unicode , restriction_type = "dict_key" , restriction_arg = { u'0100.0ccc.cccd' : { } , u'0304.0800.0700' : { } } , ) , is_leaf = True , yang_name = "bpdu-mac" , rest_name = "bpdu-mac" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'cli-full-command' : None , u'info' : u'Configure VLAN spanning-tree control MAC' , u'cli-full-no' : None , u'display-when' : u'((/protocol/spanning-tree/pvst) or (/protocol/spanning-tree/rpvst))' } } , namespace = 'urn:brocade.com:mgmt:brocade-xstp' , defining_module = 'brocade-xstp' , yang_type = 'enumeration' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """bpdu_mac must be of a type compatible with enumeration""" , 'defined-type' : "brocade-xstp:enumeration" , 'generated-type' : """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'0100.0ccc.cccd': {}, u'0304.0800.0700': {}},), is_leaf=True, yang_name="bpdu-mac", rest_name="bpdu-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure VLAN spanning-tree control MAC', u'cli-full-no': None, u'display-when': u'((/protocol/spanning-tree/pvst) or (/protocol/spanning-tree/rpvst))'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='enumeration', is_config=True)""" , } )
self . __bpdu_mac = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def x_lower_limit ( self , limit = None ) :
"""Returns or sets ( if a value is provided ) the value at which the
x - axis should start . By default this is zero ( unless there are negative
values ) .
: param limit : If given , the chart ' s x _ lower _ limit will be set to this .
: raises ValueError : if you try to make the lower limit larger than the upper limit ."""
|
if limit is None :
if self . _x_lower_limit is None :
if self . smallest_x ( ) < 0 :
if self . smallest_x ( ) == self . largest_x ( ) :
return int ( self . smallest_x ( ) - 1 )
else :
return self . smallest_x ( )
else :
return 0
else :
return self . _x_lower_limit
else :
if not is_numeric ( limit ) :
raise TypeError ( "lower x limit must be numeric, not '%s'" % str ( limit ) )
if limit >= self . largest_x ( ) :
raise ValueError ( "lower x limit must be less than upper limit (%s), not %s" % ( str ( self . largest_x ( ) ) , str ( limit ) ) )
self . _x_lower_limit = limit
|
async def remove_key ( request : web . Request ) -> web . Response :
"""Remove a key .
DELETE / wifi / keys / : id
- > 200 OK
{ message : ' Removed key keyfile . pem ' }"""
|
keys_dir = CONFIG [ 'wifi_keys_dir' ]
available_keys = os . listdir ( keys_dir )
requested_hash = request . match_info [ 'key_uuid' ]
if requested_hash not in available_keys :
return web . json_response ( { 'message' : 'No such key file {}' . format ( requested_hash ) } , status = 404 )
key_path = os . path . join ( keys_dir , requested_hash )
name = os . listdir ( key_path ) [ 0 ]
shutil . rmtree ( key_path )
return web . json_response ( { 'message' : 'Key file {} deleted' . format ( name ) } , status = 200 )
|
def mcc ( x , axis = 0 , autocorrect = False ) :
"""Matthews correlation
Parameters
x : ndarray
dataset of binary [ 0,1 ] values
axis : int , optional
Variables as columns is the default ( axis = 0 ) . If variables
are in the rows use axis = 1
autocorrect : bool , optional
If all predictions are True or all are False , then MCC
returns np . NaN
Set autocorrect = True to return a 0.0 correlation instead .
Returns
r : ndarray
Matthews correlation
p : ndarray
p - values of the Chi ^ 2 test statistics
Notes :
(1 ) We cannot directly transform the Chi ^ 2 test statistics to
the Matthews correlation because the relationship is
| r | = sqrt ( chi2 / n )
chi2 = r * r * n
(2 ) The sign would be missing . Therefore , as a rule of thumbs ,
If you want to optimize ABS ( r _ mcc ) then just use the Chi2 / n
directly ( Divide Chi ^ 2 by the number of observations )
Examples :
import korr
r , pval = korr . mcc ( X )
Alternatives :
from sklearn . metrics import matthews _ corrcoef
r = matthews _ corrcoef ( y _ true , y _ pred )"""
|
# transpose if axis < > 0
if axis is not 0 :
x = x . T
# read dimensions and
n , c = x . shape
# check if enough variables provided
if c < 2 :
raise Exception ( "Only " + str ( c ) + " variables provided. Min. 2 required." )
# allocate variables
r = np . ones ( ( c , c ) )
p = np . zeros ( ( c , c ) )
# compute each ( i , j ) - th correlation
for i in range ( 0 , c ) :
for j in range ( i + 1 , c ) :
cm = confusion ( x [ : , i ] , x [ : , j ] )
r [ i , j ] = confusion_to_mcc ( cm )
r [ j , i ] = r [ i , j ]
p [ i , j ] = 1 - scipy . stats . chi2 . cdf ( r [ i , j ] * r [ i , j ] * n , 1 )
p [ j , i ] = p [ i , j ]
# replace NaN with 0.0
if autocorrect :
r = np . nan_to_num ( r )
# done
return r , p
|
def close ( self ) :
"""Close this pipe object . Future calls to ` read ` after the buffer
has been emptied will return immediately with an empty string ."""
|
self . _lock . acquire ( )
try :
self . _closed = True
self . _cv . notifyAll ( )
if self . _event is not None :
self . _event . set ( )
finally :
self . _lock . release ( )
|
def p_always_ff ( self , p ) :
'always _ ff : ALWAYS _ FF senslist always _ statement'
|
p [ 0 ] = AlwaysFF ( p [ 2 ] , p [ 3 ] , lineno = p . lineno ( 1 ) )
|
def get_face_fun_on ( self ) :
"""determine extend fmt"""
|
command = const . CMD_OPTIONS_RRQ
command_string = b'FaceFunOn\x00'
response_size = 1024
cmd_response = self . __send_command ( command , command_string , response_size )
if cmd_response . get ( 'status' ) :
response = ( self . __data . split ( b'=' , 1 ) [ - 1 ] . split ( b'\x00' ) [ 0 ] )
return safe_cast ( response , int , 0 ) if response else 0
else :
self . _clear_error ( command_string )
return None
|
def resolve ( self , var , context ) :
"""Resolves a variable out of context if it ' s not in quotes"""
|
if var is None :
return var
if var [ 0 ] in ( '"' , "'" ) and var [ - 1 ] == var [ 0 ] :
return var [ 1 : - 1 ]
else :
return template . Variable ( var ) . resolve ( context )
|
def get_commission_coeff ( self , code ) :
"""当前无法区分是百分比还是按手数收费 , 不过可以拿到以后自行判断"""
|
return max ( self . get_code ( code ) . get ( 'commission_coeff_peramount' ) , self . get_code ( code ) . get ( 'commission_coeff_pervol' ) )
|
def check_keystore_json ( jsondata : Dict ) -> bool :
"""Check if ` ` jsondata ` ` has the structure of a keystore file version 3.
Note that this test is not complete , e . g . it doesn ' t check key derivation or cipher parameters .
Copied from https : / / github . com / vbuterin / pybitcointools
Args :
jsondata : Dictionary containing the data from the json file
Returns :
` True ` if the data appears to be valid , otherwise ` False `"""
|
if 'crypto' not in jsondata and 'Crypto' not in jsondata :
return False
if 'version' not in jsondata :
return False
if jsondata [ 'version' ] != 3 :
return False
crypto = jsondata . get ( 'crypto' , jsondata . get ( 'Crypto' ) )
if 'cipher' not in crypto :
return False
if 'ciphertext' not in crypto :
return False
if 'kdf' not in crypto :
return False
if 'mac' not in crypto :
return False
return True
|
def write_table ( table , path , column_styles = None , cell_styles = None ) :
"""Exporta una tabla en el formato deseado ( CSV o XLSX ) .
La extensión del archivo debe ser " . csv " o " . xlsx " , y en función de
ella se decidirá qué método usar para escribirlo .
Args :
table ( list of dicts ) : Tabla a ser exportada .
path ( str ) : Path al archivo CSV o XLSX de exportación ."""
|
assert isinstance ( path , string_types ) , "`path` debe ser un string"
assert isinstance ( table , list ) , "`table` debe ser una lista de dicts"
# si la tabla está vacía , no escribe nada
if len ( table ) == 0 :
logger . warning ( "Tabla vacia: no se genera ninguna archivo." )
return
# Sólo sabe escribir listas de diccionarios con información tabular
if not helpers . is_list_of_matching_dicts ( table ) :
raise ValueError ( """
La lista ingresada no esta formada por diccionarios con las mismas claves.""" )
# Deduzco el formato de archivo de ` path ` y redirijo según corresponda .
suffix = path . split ( "." ) [ - 1 ]
if suffix == "csv" :
return _write_csv_table ( table , path )
elif suffix == "xlsx" :
return _write_xlsx_table ( table , path , column_styles , cell_styles )
else :
raise ValueError ( """
{} no es un sufijo reconocido. Pruebe con .csv o.xlsx""" . format ( suffix ) )
|
def drop_modifiers ( sentence_str ) :
"""Given a string , drop the modifiers and return a string
without them"""
|
tdoc = textacy . Doc ( sentence_str , lang = 'en_core_web_lg' )
new_sent = tdoc . text
unusual_char = '形'
for tag in tdoc :
if tag . dep_ . endswith ( 'mod' ) : # Replace the tag
new_sent = new_sent [ : tag . idx ] + unusual_char * len ( tag . text ) + new_sent [ tag . idx + len ( tag . text ) : ]
new_sent = new_sent . replace ( unusual_char , '' )
new_sent = textacy . preprocess . normalize_whitespace ( new_sent )
return new_sent
|
def inflate_dtype ( arr , names ) :
"""Create structured dtype from a 2d ndarray with unstructured dtype ."""
|
arr = np . asanyarray ( arr )
if has_structured_dt ( arr ) :
return arr . dtype
s_dt = arr . dtype
dt = [ ( n , s_dt ) for n in names ]
dt = np . dtype ( dt )
return dt
|
def run ( self ) :
"""Run the simulation ."""
|
# Define the resource requirements of each component in the simulation .
vertices_resources = { # Every component runs on exactly one core and consumes a certain
# amount of SDRAM to hold configuration data .
component : { Cores : 1 , SDRAM : component . _get_config_size ( ) } for component in self . _components }
# Work out what SpiNNaker application needs to be loaded for each
# component
vertices_applications = { component : component . _get_kernel ( ) for component in self . _components }
# Convert the Wire objects into Rig Net objects and create a lookup
# from Net to the ( key , mask ) to use .
net_keys = { Net ( wire . source , wire . sinks ) : ( wire . routing_key , 0xFFFFFFFF ) for wire in self . _wires }
nets = list ( net_keys )
# Boot the SpiNNaker machine and interrogate it to determine what
# resources ( e . g . cores , SDRAM etc . ) are available .
mc = MachineController ( self . _hostname )
mc . boot ( )
system_info = mc . get_system_info ( )
# Automatically chose which chips and cores to use for each component
# and generate routing tables .
placements , allocations , application_map , routing_tables = place_and_route_wrapper ( vertices_resources , vertices_applications , nets , net_keys , system_info )
with mc . application ( ) : # Allocate memory for configuration data , tagged by core number .
memory_allocations = sdram_alloc_for_vertices ( mc , placements , allocations )
# Load the configuration data for all components
for component , memory in memory_allocations . items ( ) :
component . _write_config ( memory )
# Load all routing tables
mc . load_routing_tables ( routing_tables )
# Load all SpiNNaker application kernels
mc . load_application ( application_map )
# Wait for all six cores to reach the ' sync0 ' barrier
mc . wait_for_cores_to_reach_state ( "sync0" , len ( self . _components ) )
# Send the ' sync0 ' signal to start execution and wait for the
# simulation to finish .
mc . send_signal ( "sync0" )
time . sleep ( self . length * 0.001 )
mc . wait_for_cores_to_reach_state ( "exit" , len ( self . _components ) )
# Retrieve result data
for component , memory in memory_allocations . items ( ) :
component . _read_results ( memory )
|
def widen ( self ) :
"""Increase the interval size ."""
|
t , h = self . time , self . half_duration
h *= self . scaling_coeff_x
self . set_interval ( ( t - h , t + h ) )
|
def _get_cpu_info_from_proc_cpuinfo ( ) :
'''Returns the CPU info gathered from / proc / cpuinfo .
Returns { } if / proc / cpuinfo is not found .'''
|
try : # Just return { } if there is no cpuinfo
if not DataSource . has_proc_cpuinfo ( ) :
return { }
returncode , output = DataSource . cat_proc_cpuinfo ( )
if returncode != 0 :
return { }
# Various fields
vendor_id = _get_field ( False , output , None , '' , 'vendor_id' , 'vendor id' , 'vendor' )
processor_brand = _get_field ( True , output , None , None , 'model name' , 'cpu' , 'processor' )
cache_size = _get_field ( False , output , None , '' , 'cache size' )
stepping = _get_field ( False , output , int , 0 , 'stepping' )
model = _get_field ( False , output , int , 0 , 'model' )
family = _get_field ( False , output , int , 0 , 'cpu family' )
hardware = _get_field ( False , output , None , '' , 'Hardware' )
# Flags
flags = _get_field ( False , output , None , None , 'flags' , 'Features' )
if flags :
flags = flags . split ( )
flags . sort ( )
# Convert from MHz string to Hz
hz_actual = _get_field ( False , output , None , '' , 'cpu MHz' , 'cpu speed' , 'clock' )
hz_actual = hz_actual . lower ( ) . rstrip ( 'mhz' ) . strip ( )
hz_actual = _to_decimal_string ( hz_actual )
# Convert from GHz / MHz string to Hz
hz_advertised , scale = ( None , 0 )
try :
hz_advertised , scale = _parse_cpu_brand_string ( processor_brand )
except Exception :
pass
info = { 'hardware_raw' : hardware , 'brand_raw' : processor_brand , 'l3_cache_size' : _to_friendly_bytes ( cache_size ) , 'flags' : flags , 'vendor_id_raw' : vendor_id , 'stepping' : stepping , 'model' : model , 'family' : family , }
# Make the Hz the same for actual and advertised if missing any
if not hz_advertised or hz_advertised == '0.0' :
hz_advertised = hz_actual
scale = 6
elif not hz_actual or hz_actual == '0.0' :
hz_actual = hz_advertised
# Add the Hz if there is one
if _hz_short_to_full ( hz_advertised , scale ) > ( 0 , 0 ) :
info [ 'hz_advertised_friendly' ] = _hz_short_to_friendly ( hz_advertised , scale )
info [ 'hz_advertised' ] = _hz_short_to_full ( hz_advertised , scale )
if _hz_short_to_full ( hz_actual , scale ) > ( 0 , 0 ) :
info [ 'hz_actual_friendly' ] = _hz_short_to_friendly ( hz_actual , 6 )
info [ 'hz_actual' ] = _hz_short_to_full ( hz_actual , 6 )
info = { k : v for k , v in info . items ( ) if v }
return info
except : # raise # NOTE : To have this throw on error , uncomment this line
return { }
|
def _parse_path ( path_args ) :
"""Parses positional arguments into key path with kinds and IDs .
: type path _ args : tuple
: param path _ args : A tuple from positional arguments . Should be
alternating list of kinds ( string ) and ID / name
parts ( int or string ) .
: rtype : : class : ` list ` of : class : ` dict `
: returns : A list of key parts with kind and ID or name set .
: raises : : class : ` ValueError ` if there are no ` ` path _ args ` ` , if one of
the kinds is not a string or if one of the IDs / names is not
a string or an integer ."""
|
if len ( path_args ) == 0 :
raise ValueError ( "Key path must not be empty." )
kind_list = path_args [ : : 2 ]
id_or_name_list = path_args [ 1 : : 2 ]
# Dummy sentinel value to pad incomplete key to even length path .
partial_ending = object ( )
if len ( path_args ) % 2 == 1 :
id_or_name_list += ( partial_ending , )
result = [ ]
for kind , id_or_name in zip ( kind_list , id_or_name_list ) :
curr_key_part = { }
if isinstance ( kind , six . string_types ) :
curr_key_part [ "kind" ] = kind
else :
raise ValueError ( kind , "Kind was not a string." )
if isinstance ( id_or_name , six . string_types ) :
curr_key_part [ "name" ] = id_or_name
elif isinstance ( id_or_name , six . integer_types ) :
curr_key_part [ "id" ] = id_or_name
elif id_or_name is not partial_ending :
raise ValueError ( id_or_name , "ID/name was not a string or integer." )
result . append ( curr_key_part )
return result
|
def samtools_view ( self , file_name , param , postpend = "" ) :
"""Run samtools view , with flexible parameters and post - processing .
This is used internally to implement the various count _ reads functions .
: param str file _ name : file _ name
: param str param : String of parameters to pass to samtools view
: param str postpend : String to append to the samtools command ;
useful to add cut , sort , wc operations to the samtools view output ."""
|
cmd = "{} view {} {} {}" . format ( self . tools . samtools , param , file_name , postpend )
return subprocess . check_output ( cmd , shell = True )
|
async def sunion ( self , keys , * args ) :
"Return the union of sets specified by ` ` keys ` `"
|
args = list_or_args ( keys , args )
return await self . execute_command ( 'SUNION' , * args )
|
def get_sampletypes ( self ) :
"""Returns the available SampleTypes of the system"""
|
query = { "portal_type" : "SampleType" , "sort_on" : "sortable_title" , "sort_order" : "ascending" , "is_active" : True , }
results = api . search ( query , "bika_setup_catalog" )
return map ( api . get_object , results )
|
def remove ( self , rel_path , propagate = False ) :
'''Delete the file from the cache , and from the upstream'''
|
repo_path = os . path . join ( self . cache_dir , rel_path )
c = self . database . cursor ( )
c . execute ( "DELETE FROM files WHERE path = ?" , ( rel_path , ) )
if os . path . exists ( repo_path ) :
os . remove ( repo_path )
self . database . commit ( )
if self . upstream and propagate :
self . upstream . remove ( rel_path , propagate )
|
async def initialize ( self ) :
'''Initialize static data like images and flavores and set it as object property'''
|
flavors = await self . _list_flavors ( )
images = await self . _list_images ( )
self . flavors_map = bidict ( )
self . images_map = bidict ( )
self . images_details = { }
for flavor in flavors :
self . flavors_map . put ( flavor [ 'id' ] , flavor [ 'name' ] , on_dup_key = 'OVERWRITE' , on_dup_val = 'OVERWRITE' )
for image in images : # @ TODO filetes :
# @ TODO filtering by owner
# if hasattr ( image , ' owner _ id ' ) and image . owner _ id in self . config [ ' image _ owner _ ids ' ] :
# @ TODO enable filtering by tag
# if ' lastest ' in image . tags :
self . images_details [ image [ 'id' ] ] = { 'name' : image [ 'name' ] , 'created_at' : image [ 'created_at' ] , 'latest' : 'latest' in image [ 'tags' ] }
self . images_map . put ( image [ 'id' ] , image [ 'name' ] , on_dup_key = 'OVERWRITE' , on_dup_val = 'OVERWRITE' )
|
def validate_read ( self , kwargs ) :
"""we don ' t support start , stop kwds in Sparse"""
|
kwargs = super ( ) . validate_read ( kwargs )
if 'start' in kwargs or 'stop' in kwargs :
raise NotImplementedError ( "start and/or stop are not supported " "in fixed Sparse reading" )
return kwargs
|
def __gen_primary_text_file ( self ) :
"""generate the PAULA file that contains the primary text of the document
graph . ( PAULA documents can have more than one primary text , but
discoursegraphs only works with documents that are based on exactly one
primary text . )
Example
< ? xml version = " 1.0 " standalone = " no " ? >
< ! DOCTYPE paula SYSTEM " paula _ text . dtd " >
< paula version = " 1.1 " >
< header paula _ id = " maz - 1423 . text " type = " text " / >
< body > Zum Angewöhnen . . . < / body >
< / paula >"""
|
paula_id = '{0}.{1}.text' . format ( self . corpus_name , self . name )
E , tree = gen_paula_etree ( paula_id )
tree . append ( E . body ( get_text ( self . dg ) ) )
self . files [ paula_id ] = tree
self . file2dtd [ paula_id ] = PaulaDTDs . text
return paula_id
|
def _check_arg ( self , arg ) :
"""Check individual argument ( list / tuple / string / etc )"""
|
if isinstance ( arg , list ) :
return self . _get_dependencies_from_args ( arg )
elif isinstance ( arg , dict ) :
return self . _get_dependencies_from_kwargs ( arg )
if not is_dependency_name ( arg ) :
return set ( )
return set ( [ arg [ 1 : ] ] )
|
def addVariantSet ( self ) :
"""Adds a new VariantSet into this repo ."""
|
self . _openRepo ( )
dataset = self . _repo . getDatasetByName ( self . _args . datasetName )
dataUrls = self . _args . dataFiles
name = self . _args . name
if len ( dataUrls ) == 1 :
if self . _args . name is None :
name = getNameFromPath ( dataUrls [ 0 ] )
if os . path . isdir ( dataUrls [ 0 ] ) : # Read in the VCF files from the directory .
# TODO support uncompressed VCF and BCF files
vcfDir = dataUrls [ 0 ]
pattern = os . path . join ( vcfDir , "*.vcf.gz" )
dataUrls = glob . glob ( pattern )
if len ( dataUrls ) == 0 :
raise exceptions . RepoManagerException ( "Cannot find any VCF files in the directory " "'{}'." . format ( vcfDir ) )
dataUrls [ 0 ] = self . _getFilePath ( dataUrls [ 0 ] , self . _args . relativePath )
elif self . _args . name is None :
raise exceptions . RepoManagerException ( "Cannot infer the intended name of the VariantSet when " "more than one VCF file is provided. Please provide a " "name argument using --name." )
parsed = urlparse . urlparse ( dataUrls [ 0 ] )
if parsed . scheme not in [ 'http' , 'ftp' ] :
dataUrls = map ( lambda url : self . _getFilePath ( url , self . _args . relativePath ) , dataUrls )
# Now , get the index files for the data files that we ' ve now obtained .
indexFiles = self . _args . indexFiles
if indexFiles is None : # First check if all the paths exist locally , as they must
# if we are making a default index path .
for dataUrl in dataUrls :
if not os . path . exists ( dataUrl ) :
raise exceptions . MissingIndexException ( "Cannot find file '{}'. All variant files must be " "stored locally if the default index location is " "used. If you are trying to create a VariantSet " "based on remote URLs, please download the index " "files to the local file system and provide them " "with the --indexFiles argument" . format ( dataUrl ) )
# We assume that the indexes are made by adding . tbi
indexSuffix = ".tbi"
# TODO support BCF input properly here by adding . csi
indexFiles = [ filename + indexSuffix for filename in dataUrls ]
indexFiles = map ( lambda url : self . _getFilePath ( url , self . _args . relativePath ) , indexFiles )
variantSet = variants . HtslibVariantSet ( dataset , name )
variantSet . populateFromFile ( dataUrls , indexFiles )
# Get the reference set that is associated with the variant set .
referenceSetName = self . _args . referenceSetName
if referenceSetName is None : # Try to find a reference set name from the VCF header .
referenceSetName = variantSet . getVcfHeaderReferenceSetName ( )
if referenceSetName is None :
raise exceptions . RepoManagerException ( "Cannot infer the ReferenceSet from the VCF header. Please " "specify the ReferenceSet to associate with this " "VariantSet using the --referenceSetName option" )
referenceSet = self . _repo . getReferenceSetByName ( referenceSetName )
variantSet . setReferenceSet ( referenceSet )
variantSet . setAttributes ( json . loads ( self . _args . attributes ) )
# Now check for annotations
annotationSets = [ ]
if variantSet . isAnnotated ( ) and self . _args . addAnnotationSets :
ontologyName = self . _args . ontologyName
if ontologyName is None :
raise exceptions . RepoManagerException ( "A sequence ontology name must be provided" )
ontology = self . _repo . getOntologyByName ( ontologyName )
self . _checkSequenceOntology ( ontology )
for annotationSet in variantSet . getVariantAnnotationSets ( ) :
annotationSet . setOntology ( ontology )
annotationSets . append ( annotationSet )
# Add the annotation sets and the variant set as an atomic update
def updateRepo ( ) :
self . _repo . insertVariantSet ( variantSet )
for annotationSet in annotationSets :
self . _repo . insertVariantAnnotationSet ( annotationSet )
self . _updateRepo ( updateRepo )
|
def _cacheAllJobs ( self ) :
"""Downloads all jobs in the current job store into self . jobCache ."""
|
logger . debug ( 'Caching all jobs in job store' )
self . _jobCache = { jobGraph . jobStoreID : jobGraph for jobGraph in self . _jobStore . jobs ( ) }
logger . debug ( '{} jobs downloaded.' . format ( len ( self . _jobCache ) ) )
|
def tuning_ranges ( self ) :
"""A dictionary describing the ranges of all tuned hyperparameters .
The keys are the names of the hyperparameter , and the values are the ranges ."""
|
out = { }
for _ , ranges in self . description ( ) [ 'HyperParameterTuningJobConfig' ] [ 'ParameterRanges' ] . items ( ) :
for param in ranges :
out [ param [ 'Name' ] ] = param
return out
|
def _read_para_echo_response_unsigned ( self , code , cbit , clen , * , desc , length , version ) :
"""Read HIP ECHO _ RESPONSE _ UNSIGNED parameter .
Structure of HIP ECHO _ RESPONSE _ UNSIGNED parameter [ RFC 7401 ] :
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
| Type | Length |
| Opaque data ( variable length ) |
Octets Bits Name Description
0 0 echo _ response _ unsigned . type Parameter Type
1 15 echo _ response _ unsigned . critical Critical Bit
2 16 echo _ response _ unsigned . length Length of Contents
4 32 echo _ response _ unsigned . data Opaque Data"""
|
_data = self . _read_fileng ( clen )
echo_response_unsigned = dict ( type = desc , critical = cbit , length = clen , data = _data , )
_plen = length - clen
if _plen :
self . _read_fileng ( _plen )
return echo_response_unsigned
|
def add_node_to_network ( self , node , network ) :
"""Add node to the chain and receive transmissions ."""
|
network . add_node ( node )
parents = node . neighbors ( direction = "from" )
if len ( parents ) :
parent = parents [ 0 ]
parent . transmit ( )
node . receive ( )
|
def execute ( self , slave_id , route_map ) :
"""Execute the Modbus function registered for a route .
: param slave _ id : Slave id .
: param eindpoint : Instance of modbus . route . Map ."""
|
for index , value in enumerate ( self . values ) :
address = self . starting_address + index
endpoint = route_map . match ( slave_id , self . function_code , address )
try :
endpoint ( slave_id = slave_id , address = address , value = value , function_code = self . function_code )
# route _ map . match ( ) returns None if no match is found . Calling None
# results in TypeError .
except TypeError :
raise IllegalDataAddressError ( )
|
def audio_stream_capture ( self , httptype = None , channel = None , path_file = None ) :
"""Params :
path _ file - path to output file
channel : - integer
httptype - type string ( singlepart or multipart )
singlepart : HTTP content is a continuos flow of audio packets
multipart : HTTP content type is multipart / x - mixed - replace , and
each audio packet ends with a boundary string"""
|
if httptype is None and channel is None :
raise RuntimeError ( "Requires htttype and channel" )
ret = self . command ( 'audio.cgi?action=getAudio&httptype={0}&channel={1}' . format ( httptype , channel ) )
if path_file :
with open ( path_file , 'wb' ) as out_file :
shutil . copyfileobj ( ret . raw , out_file )
return ret . raw
|
def _meet ( intervals_hier , labels_hier , frame_size ) :
'''Compute the ( sparse ) least - common - ancestor ( LCA ) matrix for a
hierarchical segmentation .
For any pair of frames ` ` ( s , t ) ` ` , the LCA is the deepest level in
the hierarchy such that ` ` ( s , t ) ` ` are contained within a single
segment at that level .
Parameters
intervals _ hier : list of ndarray
An ordered list of segment interval arrays .
The list is assumed to be ordered by increasing specificity ( depth ) .
labels _ hier : list of list of str
` ` labels _ hier [ i ] ` ` contains the segment labels for the
` ` i ` ` th layer of the annotations
frame _ size : number
The length of the sample frames ( in seconds )
Returns
meet _ matrix : scipy . sparse . csr _ matrix
A sparse matrix such that ` ` meet _ matrix [ i , j ] ` ` contains the depth
of the deepest segment label containing both ` ` i ` ` and ` ` j ` ` .'''
|
frame_size = float ( frame_size )
# Figure out how many frames we need
n_start , n_end = _hierarchy_bounds ( intervals_hier )
n = int ( ( _round ( n_end , frame_size ) - _round ( n_start , frame_size ) ) / frame_size )
# Initialize the meet matrix
meet_matrix = scipy . sparse . lil_matrix ( ( n , n ) , dtype = np . uint8 )
for level , ( intervals , labels ) in enumerate ( zip ( intervals_hier , labels_hier ) , 1 ) : # Encode the labels at this level
lab_enc = util . index_labels ( labels ) [ 0 ]
# Find unique agreements
int_agree = np . triu ( np . equal . outer ( lab_enc , lab_enc ) )
# Map intervals to frame indices
int_frames = ( _round ( intervals , frame_size ) / frame_size ) . astype ( int )
# For each intervals i , j where labels agree , update the meet matrix
for ( seg_i , seg_j ) in zip ( * np . where ( int_agree ) ) :
idx_i = slice ( * list ( int_frames [ seg_i ] ) )
idx_j = slice ( * list ( int_frames [ seg_j ] ) )
meet_matrix [ idx_i , idx_j ] = level
if seg_i != seg_j :
meet_matrix [ idx_j , idx_i ] = level
return scipy . sparse . csr_matrix ( meet_matrix )
|
def RelaxNGSetSchema ( self , schema ) :
"""Use RelaxNG to validate the document as it is processed .
Activation is only possible before the first Read ( ) . if
@ schema is None , then RelaxNG validation is desactivated . @
The @ schema should not be freed until the reader is
deallocated or its use has been deactivated ."""
|
if schema is None :
schema__o = None
else :
schema__o = schema . _o
ret = libxml2mod . xmlTextReaderRelaxNGSetSchema ( self . _o , schema__o )
return ret
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.