signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def _clean_bindings ( self , bindings ) :
"""Remove all of the expressions from bindings
: param bindings : The bindings to clean
: type bindings : list
: return : The cleaned bindings
: rtype : list""" | return list ( filter ( lambda b : not isinstance ( b , QueryExpression ) , bindings ) ) |
def spherical_histogram ( data = None , radial_bins = "numpy" , theta_bins = 16 , phi_bins = 16 , transformed = False , * args , ** kwargs ) :
"""Facade construction function for the SphericalHistogram .""" | dropna = kwargs . pop ( "dropna" , True )
data = _prepare_data ( data , transformed = transformed , klass = SphericalHistogram , dropna = dropna )
if isinstance ( theta_bins , int ) :
theta_range = ( 0 , np . pi )
if "theta_range" in "kwargs" :
theta_range = kwargs [ "theta_range" ]
elif "range" in "kwargs" :
theta_range = kwargs [ "range" ] [ 1 ]
theta_range = list ( theta_range ) + [ theta_bins + 1 ]
theta_bins = np . linspace ( * theta_range )
if isinstance ( phi_bins , int ) :
phi_range = ( 0 , 2 * np . pi )
if "phi_range" in "kwargs" :
phi_range = kwargs [ "phi_range" ]
elif "range" in "kwargs" :
phi_range = kwargs [ "range" ] [ 2 ]
phi_range = list ( phi_range ) + [ phi_bins + 1 ]
phi_bins = np . linspace ( * phi_range )
bin_schemas = binnings . calculate_bins_nd ( data , [ radial_bins , theta_bins , phi_bins ] , * args , check_nan = not dropna , ** kwargs )
weights = kwargs . pop ( "weights" , None )
frequencies , errors2 , missed = histogram_nd . calculate_frequencies ( data , ndim = 3 , binnings = bin_schemas , weights = weights )
return SphericalHistogram ( binnings = bin_schemas , frequencies = frequencies , errors2 = errors2 , missed = missed ) |
def feed_ssldata ( self , data ) :
"""Feed SSL record level data into the pipe .
The data must be a bytes instance . It is OK to send an empty bytes
instance . This can be used to get ssldata for a handshake initiated by
this endpoint .
Return a ( ssldata , appdata ) tuple . The ssldata element is a list of
buffers containing SSL data that needs to be sent to the remote SSL .
The appdata element is a list of buffers containing plaintext data that
needs to be forwarded to the application . The appdata list may contain
an empty buffer indicating an SSL " close _ notify " alert . This alert must
be acknowledged by calling : meth : ` shutdown ` .""" | if self . _state == self . S_UNWRAPPED : # If unwrapped , pass plaintext data straight through .
return ( [ ] , [ data ] if data else [ ] )
ssldata = [ ] ;
appdata = [ ]
self . _need_ssldata = False
if data :
self . _incoming . write ( data )
try :
if self . _state == self . S_DO_HANDSHAKE : # Call do _ handshake ( ) until it doesn ' t raise anymore .
self . _sslobj . do_handshake ( )
self . _state = self . S_WRAPPED
if self . _handshake_cb :
self . _handshake_cb ( )
if self . _state == self . S_WRAPPED : # Main state : read data from SSL until close _ notify
while True :
chunk = self . _sslobj . read ( self . bufsize )
appdata . append ( chunk )
if not chunk : # close _ notify
break
if self . _state == self . S_SHUTDOWN : # Call shutdown ( ) until it doesn ' t raise anymore .
self . _sslobj . unwrap ( )
self . _sslobj = None
self . _state = self . S_UNWRAPPED
if self . _shutdown_cb :
self . _shutdown_cb ( )
if self . _state == self . S_UNWRAPPED : # Drain possible plaintext data after close _ notify .
appdata . append ( self . _incoming . read ( ) )
except ( ssl . SSLError , sslcompat . CertificateError ) as e :
if getattr ( e , 'errno' , None ) not in ( ssl . SSL_ERROR_WANT_READ , ssl . SSL_ERROR_WANT_WRITE , ssl . SSL_ERROR_SYSCALL ) :
if self . _state == self . S_DO_HANDSHAKE and self . _handshake_cb :
self . _handshake_cb ( e )
raise
self . _need_ssldata = e . errno == ssl . SSL_ERROR_WANT_READ
# Check for record level data that needs to be sent back .
# Happens for the initial handshake and renegotiations .
if self . _outgoing . pending :
ssldata . append ( self . _outgoing . read ( ) )
return ( ssldata , appdata ) |
def _register_endpoints ( self , providers ) :
"""Register methods to endpoints
: type providers : list [ str ]
: rtype : list [ ( str , ( ( satosa . context . Context , Any ) - > satosa . response . Response , Any ) ) ]
: param providers : A list of backend providers
: return : A list of endpoint / method pairs""" | url_map = [ ]
for endp_category in self . endpoints :
for binding , endp in self . endpoints [ endp_category ] . items ( ) :
valid_providers = ""
for provider in providers :
valid_providers = "{}|^{}" . format ( valid_providers , provider )
valid_providers = valid_providers . lstrip ( "|" )
parsed_endp = urlparse ( endp )
url_map . append ( ( "(%s)/%s$" % ( valid_providers , parsed_endp . path ) , functools . partial ( self . handle_authn_request , binding_in = binding ) ) )
if self . expose_entityid_endpoint ( ) :
parsed_entity_id = urlparse ( self . idp . config . entityid )
url_map . append ( ( "^{0}" . format ( parsed_entity_id . path [ 1 : ] ) , self . _metadata_endpoint ) )
return url_map |
def _apply_scsi_controller ( adapter , adapter_type , bus_sharing , key , bus_number , operation ) :
'''Returns a vim . vm . device . VirtualDeviceSpec object specifying to
add / edit a SCSI controller
adapter
SCSI controller adapter name
adapter _ type
SCSI controller adapter type eg . paravirtual
bus _ sharing
SCSI controller bus sharing eg . virtual _ sharing
key
SCSI controller unique key
bus _ number
Device bus number property
operation
Describes the operation which should be done on the object ,
the possibles values : ' add ' and ' edit ' , the default value is ' add '
. . code - block : bash
scsi :
adapter : ' SCSI controller 0'
type : paravirtual or lsilogic or lsilogic _ sas
bus _ sharing : ' no _ sharing ' or ' virtual _ sharing ' or ' physical _ sharing ' ''' | log . trace ( 'Configuring scsi controller adapter=%s adapter_type=%s ' 'bus_sharing=%s key=%s bus_number=%s' , adapter , adapter_type , bus_sharing , key , bus_number )
scsi_spec = vim . vm . device . VirtualDeviceSpec ( )
if adapter_type == 'lsilogic' :
summary = 'LSI Logic'
scsi_spec . device = vim . vm . device . VirtualLsiLogicController ( )
elif adapter_type == 'lsilogic_sas' :
summary = 'LSI Logic Sas'
scsi_spec . device = vim . vm . device . VirtualLsiLogicSASController ( )
elif adapter_type == 'paravirtual' :
summary = 'VMware paravirtual SCSI'
scsi_spec . device = vim . vm . device . ParaVirtualSCSIController ( )
elif adapter_type == 'buslogic' :
summary = 'Bus Logic'
scsi_spec . device = vim . vm . device . VirtualBusLogicController ( )
if operation == 'add' :
scsi_spec . operation = vim . vm . device . VirtualDeviceSpec . Operation . add
elif operation == 'edit' :
scsi_spec . operation = vim . vm . device . VirtualDeviceSpec . Operation . edit
scsi_spec . device . key = key
scsi_spec . device . busNumber = bus_number
scsi_spec . device . deviceInfo = vim . Description ( )
scsi_spec . device . deviceInfo . label = adapter
scsi_spec . device . deviceInfo . summary = summary
if bus_sharing == 'virtual_sharing' : # Virtual disks can be shared between virtual machines on
# the same server
scsi_spec . device . sharedBus = vim . vm . device . VirtualSCSIController . Sharing . virtualSharing
elif bus_sharing == 'physical_sharing' : # Virtual disks can be shared between virtual machines on any server
scsi_spec . device . sharedBus = vim . vm . device . VirtualSCSIController . Sharing . physicalSharing
elif bus_sharing == 'no_sharing' : # Virtual disks cannot be shared between virtual machines
scsi_spec . device . sharedBus = vim . vm . device . VirtualSCSIController . Sharing . noSharing
return scsi_spec |
def read_config_file ( filename ) :
"""Reads the configuration file .
: param filename : the name of the file containing the configuration .
: type filename : str
: returns : A tuple where the first element is a list of sections , and the
second element is a map containing the configuration ( options and
values ) .
The structure of the configuration file is important . Here is an example of
a configuration file : :
[1 ] # Computes statistics on duplicated samples
script = duplicated _ samples
[2 ] # Removes samples according to missingness
script = sample _ missingness
[3 ] # Removes markers according to missingness
script = snp _ missingness
[4 ] # Removes samples according to missingness ( 98 % )
script = sample _ missingness
mind = 0.02
[5 ] # Performs a sex check
script = sex _ check
[6 ] # Flags markers with MAF = 0
script = flag _ maf _ zero
[7 ] # Flags markers according to Hardy Weinberg
script = flag _ hw
[8 ] # Subset the dataset ( excludes markers and remove samples )
script = subset
exclude = . . . / filename
rempove = . . . / filename
Sections are in square brackets and must be ` ` integer ` ` . The section number
represent the step at which the script will be run ( * i . e . * from the
smallest number to the biggest ) . The sections must be continuous .
Each section contains the script names ( ` ` script ` ` variable ) and options of
the script ( all other variables ) ( * e . g . * section 4 runs the
` ` sample _ missingness ` ` script ( : py : func : ` run _ sample _ missingness ` ) with
option ` ` mind ` ` sets to 0.02 ) .
Here is a list of the available scripts :
* ` ` duplicated _ samples ` ` ( : py : func : ` run _ duplicated _ samples ` )
* ` ` duplicated _ snps ` ` ( : py : func : ` run _ duplicated _ snps ` )
* ` ` noCall _ hetero _ snps ` ` ( : py : func : ` run _ noCall _ hetero _ snps ` )
* ` ` sample _ missingness ` ` ( : py : func : ` run _ sample _ missingness ` )
* ` ` snp _ missingness ` ` ( : py : func : ` run _ snp _ missingness ` )
* ` ` sex _ check ` ` ( : py : func : ` run _ sex _ check ` )
* ` ` plate _ bias ` ` ( : py : func : ` run _ plate _ bias ` )
* ` ` contamination ` ` ( : py : func : ` run _ contamination ` )
* ` ` remove _ heterozygous _ haploid ` `
( : py : func : ` run _ remove _ heterozygous _ haploid ` )
* ` ` find _ related _ samples ` ` ( : py : func : ` run _ find _ related _ samples ` )
* ` ` check _ ethnicity ` ` ( : py : func : ` run _ check _ ethnicity ` )
* ` ` flag _ maf _ zero ` ` ( : py : func : ` run _ flag _ maf _ zero ` )
* ` ` flag _ hw ` ` ( : py : func : ` run _ flag _ hw ` )
* ` ` subset ` ` ( : py : func : ` run _ subset _ data ` )
* ` ` compare _ gold _ standard ` ` ( : py : func : ` run _ compare _ gold _ standard ` )""" | # Creating the config parser
config = ConfigParser . RawConfigParser ( allow_no_value = True )
config . optionxform = str
config . read ( filename )
# Checking the section names
sections = None
try :
sections = sorted ( [ int ( i ) for i in config . sections ( ) ] )
except ValueError : # Section not integer
msg = ( "{}: sections must be integers: " "{}" . format ( filename , config . sections ( ) ) )
raise ProgramError ( msg )
if sections != range ( min ( sections ) , max ( sections ) + 1 ) : # Missing a section
msg = "{}: maybe a section is missing: {}" . format ( filename , sections )
raise ProgramError ( msg )
sections = [ str ( i ) for i in sections ]
# Reading the configuration for each sections
configuration = { }
for section in sections : # Getting the script variable ( and check it )
script_name = None
try :
script_name = config . get ( section , "script" )
except ConfigParser . NoOptionError :
msg = ( "{}: section {}: no variable called 'script'" . format ( filename , section , ) )
raise ProgramError ( msg )
if script_name not in available_modules :
msg = ( "{}: section {}: script {}: invalid script name" . format ( filename , section , script_name , ) )
raise ProgramError ( msg )
# Getting the variables
options = [ ]
for variable_name , variable_value in config . items ( section ) :
unwanted_options = { "bfile" , "tfile" , "file" , "out" }
if script_name in { "sample_missingness" , "subset" } :
unwanted_options |= { "ifile" , "is-bfile" , "is-tfile" }
if script_name == "subset" :
unwanted_options . add ( "is-file" )
for unwanted in unwanted_options :
if variable_name == unwanted :
msg = ( "{}: section {}: do not use {} as an option for " "{}" . format ( filename , section , unwanted , script_name ) )
raise ProgramError ( msg )
if variable_name != "script" :
options . append ( "--" + variable_name )
if variable_value is not None :
if variable_name in { "indep-pairwise" , "sge-nodes" , "ibs-sge-nodes" } : # This is a special option
options . extend ( variable_value . split ( " " ) )
else :
options . append ( variable_value )
# Saving the configuration
configuration [ section ] = ( script_name , options )
return sections , configuration |
def get_template ( template_name , using = None ) :
"""Loads and returns a template for the given name .
Raises TemplateDoesNotExist if no such template exists .""" | engines = _engine_list ( using )
for engine in engines :
try :
return engine . get_template ( template_name )
except TemplateDoesNotExist as e :
pass
raise TemplateDoesNotExist ( template_name ) |
def ts_to_df ( metadata ) :
"""Create a data frame from one TimeSeries object
: param dict metadata : Time Series dictionary
: return dict : One data frame per table , organized in a dictionary by name""" | logger_dataframes . info ( "enter ts_to_df" )
dfs = { }
# Plot the variable + values vs year , age , depth ( whichever are available )
dfs [ "paleoData" ] = pd . DataFrame ( _plot_ts_cols ( metadata ) )
# Plot the chronology variables + values in a data frame
dfs [ "chronData" ] = _get_key_data ( metadata , "chronData_df" )
# Take out the chronData pandas data frame object if it exists in the metadata
# Otherwise , the data frame renderer gets crazy and errors out .
if "chronData_df" in metadata :
del metadata [ "chronData_df" ]
s = collections . OrderedDict ( sorted ( metadata . items ( ) ) )
# Put key - vars in a data frame to make it easier to visualize
dfs [ "metadata" ] = pd . DataFrame ( list ( s . items ( ) ) , columns = [ 'Key' , 'Value' ] )
logger_dataframes . info ( "exit ts_to_df" )
return dfs |
def find ( cls , searched_dir , pattern ) :
"""Find matched files .
It does not include symbolic file in the result .""" | Log . debug ( 'find {0} with pattern: {1}' . format ( searched_dir , pattern ) )
matched_files = [ ]
for root_dir , dir_names , file_names in os . walk ( searched_dir , followlinks = False ) :
for file_name in file_names :
if fnmatch . fnmatch ( file_name , pattern ) :
file_path = os . path . join ( root_dir , file_name )
if not os . path . islink ( file_path ) :
matched_files . append ( file_path )
matched_files . sort ( )
return matched_files |
def WriteClientCrashInfo ( self , client_id , crash_info , cursor = None ) :
"""Writes a new client crash record .""" | query = """
SET @now = NOW(6);
INSERT INTO client_crash_history (client_id, timestamp, crash_info)
VALUES (%(client_id)s, @now, %(crash_info)s);
UPDATE clients
SET last_crash_timestamp = @now
WHERE client_id = %(client_id)s
"""
params = { "client_id" : db_utils . ClientIDToInt ( client_id ) , "crash_info" : crash_info . SerializeToString ( ) , }
try :
cursor . execute ( query , params )
except MySQLdb . IntegrityError as e :
raise db . UnknownClientError ( client_id , cause = e ) |
def approve ( self , request , * args , ** kwargs ) :
"""Approves the considered post and retirects the user to the success URL .""" | self . object = self . get_object ( )
success_url = self . get_success_url ( )
self . object . approved = True
self . object . save ( )
messages . success ( self . request , self . success_message )
return HttpResponseRedirect ( success_url ) |
def _represent_match_traversal ( match_traversal ) :
"""Emit MATCH query code for an entire MATCH traversal sequence .""" | output = [ ]
output . append ( _first_step_to_match ( match_traversal [ 0 ] ) )
for step in match_traversal [ 1 : ] :
output . append ( _subsequent_step_to_match ( step ) )
return u'' . join ( output ) |
def read_geojson ( filename ) :
"""Reads a geojson file containing an STObject and initializes a new STObject from the information in the file .
Args :
filename : Name of the geojson file
Returns :
an STObject""" | json_file = open ( filename )
data = json . load ( json_file )
json_file . close ( )
times = data [ "properties" ] [ "times" ]
main_data = dict ( timesteps = [ ] , masks = [ ] , x = [ ] , y = [ ] , i = [ ] , j = [ ] )
attribute_data = dict ( )
for feature in data [ "features" ] :
for main_name in main_data . keys ( ) :
main_data [ main_name ] . append ( np . array ( feature [ "properties" ] [ main_name ] ) )
for k , v in feature [ "properties" ] [ "attributes" ] . items ( ) :
if k not in attribute_data . keys ( ) :
attribute_data [ k ] = [ np . array ( v ) ]
else :
attribute_data [ k ] . append ( np . array ( v ) )
kwargs = { }
for kw in [ "dx" , "step" , "u" , "v" ] :
if kw in data [ "properties" ] . keys ( ) :
kwargs [ kw ] = data [ "properties" ] [ kw ]
sto = STObject ( main_data [ "timesteps" ] , main_data [ "masks" ] , main_data [ "x" ] , main_data [ "y" ] , main_data [ "i" ] , main_data [ "j" ] , times [ 0 ] , times [ - 1 ] , ** kwargs )
for k , v in attribute_data . items ( ) :
sto . attributes [ k ] = v
return sto |
def _get_features ( self ) :
"""Decide which layers to render based on current zoom level and view type .""" | if self . _satellite :
return [ ( "water" , [ ] , [ ] ) ]
elif self . _zoom <= 2 :
return [ ( "water" , [ ] , [ ] ) , ( "marine_label" , [ ] , [ 1 ] ) , ]
elif self . _zoom <= 7 :
return [ ( "admin" , [ ] , [ ] ) , ( "water" , [ ] , [ ] ) , ( "road" , [ "motorway" ] , [ ] ) , ( "country_label" , [ ] , [ ] ) , ( "marine_label" , [ ] , [ 1 ] ) , ( "state_label" , [ ] , [ ] ) , ( "place_label" , [ ] , [ "city" , "town" ] ) , ]
elif self . _zoom <= 10 :
return [ ( "admin" , [ ] , [ ] ) , ( "water" , [ ] , [ ] ) , ( "road" , [ "motorway" , "motorway_link" , "trunk" ] , [ ] ) , ( "country_label" , [ ] , [ ] ) , ( "marine_label" , [ ] , [ 1 ] ) , ( "state_label" , [ ] , [ ] ) , ( "place_label" , [ ] , [ "city" , "town" ] ) , ]
else :
return [ ( "landuse" , [ "agriculture" , "grass" , "park" ] , [ ] ) , ( "water" , [ ] , [ ] ) , ( "waterway" , [ "river" , "canal" ] , [ ] ) , ( "building" , [ ] , [ ] ) , ( "road" , [ "motorway" , "motorway_link" , "trunk" , "primary" , "secondary" ] if self . _zoom <= 14 else [ "motorway" , "motorway_link" , "trunk" , "primary" , "secondary" , "tertiary" , "link" , "street" , "tunnel" ] , [ ] ) , ( "poi_label" , [ ] , [ ] ) , ] |
def mark_failed ( self , clsname , msg ) :
'''Marks the tracer as failed with the given exception class name
: code : ` clsname ` and message : code : ` msg ` .
May only be called in the started state and only if the tracer is not
already marked as failed . Note that this does not end the tracer ! Once a
tracer is marked as failed , attempts to do it again are forbidden .
If possible , using the tracer as a context manager ( i . e . , with a
: code : ` with ` - block ) or : meth : ` . mark _ failed _ exc ` is more convenient than
this method .
: param str clsname : Fully qualified name of the exception type that
caused the failure .
: param str msg : Exception message that caused the failure .''' | self . nsdk . tracer_error ( self . handle , clsname , msg ) |
def mach2tas ( M , h ) :
"""True airspeed ( tas ) to mach number conversion""" | a = vsound ( h )
tas = M * a
return tas |
def plot_pole ( map_axis , plon , plat , A95 , label = '' , color = 'k' , edgecolor = 'k' , marker = 'o' , markersize = 20 , legend = 'no' ) :
"""This function plots a paleomagnetic pole and A95 error ellipse on a cartopy map axis .
Before this function is called , a plot needs to be initialized with code
such as that in the make _ orthographic _ map function .
Example
> > > plon = 200
> > > plat = 60
> > > A95 = 6
> > > map _ axis = ipmag . make _ orthographic _ map ( central _ longitude = 200 , central _ latitude = 30)
> > > ipmag . plot _ pole ( map _ axis , plon , plat , A95 , color = ' red ' , markersize = 40)
Required Parameters
map _ axis : the name of the current map axis that has been developed using cartopy
plon : the longitude of the paleomagnetic pole being plotted ( in degrees E )
plat : the latitude of the paleomagnetic pole being plotted ( in degrees )
A95 : the A _ 95 confidence ellipse of the paleomagnetic pole ( in degrees )
Optional Parameters ( defaults are used if not specified )
color : the default color is black . Other colors can be chosen ( e . g . ' r ' )
marker : the default is a circle . Other symbols can be chosen ( e . g . ' s ' )
markersize : the default is 20 . Other size can be chosen
label : the default is no label . Labels can be assigned .
legend : the default is no legend ( ' no ' ) . Putting ' yes ' will plot a legend .""" | if not has_cartopy :
print ( '-W- cartopy must be installed to run ipmag.plot_pole' )
return
A95_km = A95 * 111.32
map_axis . scatter ( plon , plat , marker = marker , color = color , edgecolors = edgecolor , s = markersize , label = label , zorder = 101 , transform = ccrs . Geodetic ( ) )
equi ( map_axis , plon , plat , A95_km , color )
if legend == 'yes' :
plt . legend ( loc = 2 ) |
def last_entry ( self , data_revisar = None , key_revisar = None ) :
"""Obtiene el Timestamp del último valor de la base de datos seleecionada ,
junto con el no de entradas ( filas ) total de dicho paquete de datos .
: param data _ revisar : ( OPC ) Se puede pasar un dataframe específico
: param key _ revisar : ( OPC ) Normalmente , para utilizar ' dem '
: return : tmax , num _ entradas""" | key_revisar = key_revisar or self . masterkey
data_revisar = self . data if data_revisar is None else data_revisar
# return tmax , num _ entradas
if key_revisar in data_revisar . keys ( ) :
data_rev = data_revisar [ key_revisar ]
return data_rev . index [ - 1 ] . to_pydatetime ( ) , len ( data_rev )
else :
return pd . Timestamp ( dt . datetime . strptime ( self . DATE_INI , self . DATE_FMT ) , tz = self . TZ ) . to_pydatetime ( ) , 0 |
def require_settings ( self , args , options ) :
"""Load the ZAPPA _ SETTINGS as we expect it .""" | if not options . has_key ( 'environment' ) :
print ( "You must call deploy with an environment name. \n python manage.py deploy <environment>" )
raise ImproperlyConfigured
from django . conf import settings
if not 'ZAPPA_SETTINGS' in dir ( settings ) :
print ( "Please define your ZAPPA_SETTINGS in your settings file before deploying." )
raise ImproperlyConfigured
self . zappa_settings = settings . ZAPPA_SETTINGS
# Set your configuration
if type ( options [ 'environment' ] ) == list :
self . api_stage = options [ 'environment' ] [ 0 ]
else :
self . api_stage = options [ 'environment' ]
if self . zappa_settings [ self . api_stage ] . get ( 'project_name' ) :
self . project_name = self . zappa_settings [ self . api_stage ] [ 'project_name' ]
else :
self . project_name = os . path . abspath ( settings . BASE_DIR ) . split ( os . sep ) [ - 1 ]
self . lambda_name = slugify ( self . project_name + '-' + self . api_stage ) . replace ( "_" , "-" )
if self . api_stage not in self . zappa_settings . keys ( ) :
print ( "Please make sure that the environment '" + self . api_stage + "' is defined in your ZAPPA_SETTINGS in your settings file before deploying." )
raise ImproperlyConfigured
# Load environment - specific settings
self . s3_bucket_name = self . zappa_settings [ self . api_stage ] [ 's3_bucket' ]
self . vpc_config = self . zappa_settings [ self . api_stage ] . get ( 'vpc_config' , { } )
self . memory_size = self . zappa_settings [ self . api_stage ] . get ( 'memory_size' , 512 )
self . timeout = self . zappa_settings [ self . api_stage ] . get ( 'timeout' , 30 )
custom_settings = [ 'http_methods' , 'parameter_depth' , 'integration_response_codes' , 'method_response_codes' , 'role_name' , 'aws_region' ]
for setting in custom_settings :
if self . zappa_settings [ self . api_stage ] . has_key ( setting ) :
setattr ( self . zappa , setting , self . zappa_settings [ self . api_stage ] [ setting ] ) |
def leb128_decode ( data ) :
"""Decodes a LEB128 - encoded unsigned integer .
: param BufferedIOBase data : The buffer containing the LEB128 - encoded integer to decode .
: return : The decoded integer .
: rtype : int""" | result = 0
shift = 0
while True :
character = data . read ( 1 )
if len ( character ) == 0 :
raise bitcoin . core . SerializationTruncationError ( 'Invalid LEB128 integer' )
b = ord ( character )
result |= ( b & 0x7f ) << shift
if b & 0x80 == 0 :
break
shift += 7
return result |
def createTargetOrder ( self , quantity , parentId = 0 , target = 0. , orderType = None , transmit = True , group = None , tif = "DAY" , rth = False , account = None ) :
"""Creates TARGET order""" | order = self . createOrder ( quantity , price = target , transmit = transmit , orderType = dataTypes [ "ORDER_TYPE_LIMIT" ] if orderType == None else orderType , ocaGroup = group , parentId = parentId , rth = rth , tif = tif , account = account )
return order |
def set_stream ( self , stream_id ) :
"""Set group stream .""" | self . _group [ 'stream_id' ] = stream_id
yield from self . _server . group_stream ( self . identifier , stream_id )
_LOGGER . info ( 'set stream to %s on %s' , stream_id , self . friendly_name ) |
def generate_page_toc ( soup ) :
"""Return page - level ( ~ list of headings ) TOC template data for soup""" | # Maybe we don ' t want to show all the headings . E . g . , it ' s common for a page
# to have just one H1 , a title at the top . Our heuristic : if a page has just
# one heading of some outline level , don ' t show it .
found_depth_counts = collections . defaultdict ( int )
for tag in soup . find_all ( _heading_re ) :
if ( tag . get ( 'id' ) or tag . get ( 'name' ) ) :
found_depth_counts [ hdepth ( tag ) ] += 1
depth_list = [ i for i in range ( 100 ) if 1 < found_depth_counts [ i ] ]
depth_list = depth_list [ : 4 ]
toc = [ ]
for tag in soup . find_all ( _heading_re ) :
depth = hdepth ( tag )
if depth in depth_list :
toc . append ( dict ( depth = depth_list . index ( depth ) + 1 , link = tag . get ( 'id' ) or tag . get ( 'name' ) , text = tag . text ) )
return toc |
def catalog_to_cells ( catalog , radius , order , include_fallback = True , ** kwargs ) :
"""Convert a catalog to a set of cells .
This function is intended to be used via ` catalog _ to _ moc ` but
is available for separate usage . It takes the same arguments
as that function .
This function uses the Healpy ` query _ disc ` function to get a list
of cells for each item in the catalog in turn . Additional keyword
arguments , if specified , are passed to ` query _ disc ` . This can include ,
for example , ` inclusive ` ( set to ` True ` to include cells overlapping
the radius as well as those with centers within it ) and ` fact `
( to control sampling when ` inclusive ` is specified ) .
If cells at the given order are bigger than the given radius , then
` query _ disc ` may find none inside the radius . In this case ,
if ` include _ fallback ` is ` True ` ( the default ) , the cell at each
position is included .
If the given radius is zero ( or smaller ) then Healpy ` query _ disc `
is not used - - instead the fallback position is used automatically .""" | nside = 2 ** order
# Ensure catalog is in ICRS coordinates .
catalog = catalog . icrs
# Ensure radius is in radians .
if isinstance ( radius , Quantity ) :
radius = radius . to ( radian ) . value
else :
radius = radius * pi / ( 180.0 * 3600.0 )
# Convert coordinates to position vectors .
phi = catalog . ra . radian
theta = ( pi / 2 ) - catalog . dec . radian
vectors = ang2vec ( theta , phi )
# Ensure we can iterate over vectors ( it might be a single position ) .
if catalog . isscalar :
vectors = [ vectors ]
# Query for a list of cells for each catalog position .
cells = set ( )
for vector in vectors :
if radius > 0.0 : # Try " disc " query .
vector_cells = query_disc ( nside , vector , radius , nest = True , ** kwargs )
if vector_cells . size > 0 :
cells . update ( vector_cells . tolist ( ) )
continue
elif not include_fallback :
continue
# The query didn ' t find anything - - include the cell at the
# given position at least .
cell = vec2pix ( nside , vector [ 0 ] , vector [ 1 ] , vector [ 2 ] , nest = True )
cells . add ( cell . item ( ) )
return cells |
def batch ( args ) :
"""% prog batch all . cds * . anchors
Compute Ks values for a set of anchors file . This will generate a bunch of
work directories for each comparisons . The anchorsfile should be in the form
of specie1 . species2 . anchors .""" | from jcvi . apps . grid import MakeManager
p = OptionParser ( batch . __doc__ )
opts , args = p . parse_args ( args )
if len ( args ) < 2 :
sys . exit ( not p . print_help ( ) )
cdsfile = args [ 0 ]
anchors = args [ 1 : ]
workdirs = [ "." . join ( op . basename ( x ) . split ( "." ) [ : 2 ] ) for x in anchors ]
for wd in workdirs :
mkdir ( wd )
mm = MakeManager ( )
for wd , ac in zip ( workdirs , anchors ) :
pairscdsfile = wd + ".cds.fasta"
cmd = "python -m jcvi.apps.ks prepare {} {} -o {}" . format ( ac , cdsfile , pairscdsfile )
mm . add ( ( ac , cdsfile ) , pairscdsfile , cmd )
ksfile = wd + ".ks"
cmd = "python -m jcvi.apps.ks calc {} -o {} --workdir {}" . format ( pairscdsfile , ksfile , wd )
mm . add ( pairscdsfile , ksfile , cmd )
mm . write ( ) |
def detect_FASST ( dat_orig , s_freq , time , opts , submethod = 'rms' ) :
"""Spindle detection based on FASST method , itself based on Moelle et al .
(2002 ) .
Parameters
dat _ orig : ndarray ( dtype = ' float ' )
vector with the data for one channel
s _ freq : float
sampling frequency
time : ndarray ( dtype = ' float ' )
vector with the time points for each sample
opts : instance of ' DetectSpindle '
' det _ remez ' : dict
parameters for ' remez ' ,
' moving _ rms ' : dict
parameters for ' moving _ rms '
' smooth ' : dict
parameters for ' smooth '
' det _ thresh ' : float
detection threshold
' sel _ thresh ' : nan
not used , but keep it for consistency with the other methods
' duration ' : tuple of float
min and max duration of spindles
submethod : str
' abs ' ( rectified ) or ' rms ' ( root - mean - square )
Returns
list of dict
list of detected spindles
dict
' det _ value _ lo ' with detection value , ' det _ value _ hi ' with nan ,
' sel _ value ' with nan
float
spindle density , per 30 - s epoch
References
Leclercq , Y . et al . Compu . Intel . and Neurosci . ( 2011 ) .""" | dat_det = transform_signal ( dat_orig , s_freq , 'butter' , opts . det_butter )
det_value = percentile ( dat_det , opts . det_thresh )
if submethod == 'abs' :
dat_det = transform_signal ( dat_det , s_freq , 'abs' )
elif submethod == 'rms' :
dat_det = transform_signal ( dat_det , s_freq , 'moving_rms' , opts . moving_rms )
dat_det = transform_signal ( dat_det , s_freq , 'smooth' , opts . smooth )
events = detect_events ( dat_det , 'above_thresh' , det_value )
if events is not None :
events = _merge_close ( dat_det , events , time , opts . tolerance )
events = within_duration ( events , time , opts . duration )
events = _merge_close ( dat_det , events , time , opts . min_interval )
events = remove_straddlers ( events , time , s_freq )
power_peaks = peak_in_power ( events , dat_orig , s_freq , opts . power_peaks )
powers = power_in_band ( events , dat_orig , s_freq , opts . frequency )
sp_in_chan = make_spindles ( events , power_peaks , powers , dat_det , dat_orig , time , s_freq )
else :
lg . info ( 'No spindle found' )
sp_in_chan = [ ]
values = { 'det_value_lo' : det_value , 'sel_value' : nan }
density = len ( sp_in_chan ) * s_freq * 30 / len ( dat_orig )
return sp_in_chan , values , density |
def destroy_s3 ( app = '' , env = 'dev' , ** _ ) :
"""Destroy S3 Resources for _ app _ in _ env _ .
Args :
app ( str ) : Application name
env ( str ) : Deployment environment / account name
Returns :
boolean : True if destroyed sucessfully""" | session = boto3 . Session ( profile_name = env )
client = session . resource ( 's3' )
generated = get_details ( app = app , env = env )
archaius = generated . archaius ( )
bucket = client . Bucket ( archaius [ 'bucket' ] )
for item in bucket . objects . filter ( Prefix = archaius [ 'path' ] ) :
item . Object ( ) . delete ( )
LOG . info ( 'Deleted: %s/%s' , item . bucket_name , item . key )
return True |
def _reduce_and_smooth ( obs_tidy , goal_size ) :
"""Uses interpolation to reduce the number of observations ( cells ) .
This is useful for plotting functions that otherwise will ignore
most of the cells ' values .
The reduction and smoothing is only done per column
Parameters
obs _ tidy : Pandas DataFrame . rows = obs ( eg . cells ) , cols = vars ( eg . genes )
goal _ size : number of cells to keep
Returns""" | if obs_tidy . shape [ 0 ] < goal_size :
return obs_tidy
else : # usually , a large number of cells can not be plotted , thus
# it is useful to reduce the number of cells plotted while
# smoothing the values . This should be similar to an interpolation
# but done per row and not for the entire image .
from scipy . interpolate import UnivariateSpline
x = range ( obs_tidy . shape [ 0 ] )
# maximum number of cells to keep
new_x = np . linspace ( 0 , len ( x ) , num = goal_size , endpoint = False )
new_df = obs_tidy . iloc [ new_x , : ] . copy ( )
for index , col in obs_tidy . iteritems ( ) :
spl = UnivariateSpline ( x , col . values , s = 20 )
new_df [ index ] = spl ( new_x )
return new_df . copy ( ) |
def max ( self , default = None ) :
"""Calculate the maximum value over the time series .
: param default : Value to return as a default should the calculation not be possible .
: return : Float representing the maximum value or ` None ` .""" | return numpy . asscalar ( numpy . max ( self . values ) ) if self . values else default |
def get_package_manager ( self , target = None ) :
"""Returns package manager for target argument or global config .""" | package_manager = None
if target :
target_package_manager_field = target . payload . get_field ( 'package_manager' )
if target_package_manager_field :
package_manager = target_package_manager_field . value
return self . node_distribution . get_package_manager ( package_manager = package_manager ) |
def _run ( self , circuit : circuits . Circuit , param_resolver : study . ParamResolver , repetitions : int ) -> Dict [ str , np . ndarray ] :
"""Run a simulation , mimicking quantum hardware .
Args :
circuit : The circuit to simulate .
param _ resolver : Parameters to run with the program .
repetitions : Number of times to repeat the run .
Returns :
A dictionary from measurement gate key to measurement
results . Measurement results are stored in a 2 - dimensional
numpy array , the first dimension corresponding to the repetition
and the second to the actual boolean measurement results ( ordered
by the qubits being measured . )""" | raise NotImplementedError ( ) |
def build_includes ( cls , include_packages ) :
"""The default include strategy is to add a star ( * ) wild card after all sub - packages ( but not the main package ) .
This strategy is compatible with py2app and bbfreeze .
Example ( From SaltStack 2014.7 ) :
salt
salt . fileserver . *
salt . modules . *
etc . . .
: param include _ packages : List of package references to recurse for subpackages""" | includes , package_root_paths = cls . _split_packages ( include_packages )
for package_path , package_name in six . iteritems ( package_root_paths ) :
if re . search ( r'__init__.py.*$' , package_path ) : # Looks like a package . Walk the directory and see if there are more .
package_files = set ( [ os . path . dirname ( package_path ) ] )
for root , dirs , files in os . walk ( os . path . dirname ( package_path ) ) :
if '__init__.py' in files :
package_files . add ( root )
if len ( package_files ) > 1 :
common_prefix = os . path . commonprefix ( package_files )
common_dir = os . path . dirname ( common_prefix )
package_tails = set ( [ f [ len ( common_dir ) + len ( os . sep ) : ] for f in package_files ] )
package_names = set ( [ tail . replace ( os . sep , '.' ) for tail in package_tails ] )
package_names_with_star = set ( [ pkg + '.*' if pkg != package_name else pkg for pkg in package_names ] )
includes |= package_names_with_star
else : # No sub - packages . Just add the package name by itself .
includes . add ( package_name )
else : # Not a package . Just add the module .
includes . add ( package_name )
return includes |
def unpack ( cls , msg , client , server , request_id ) :
"""Parse message and return an ` OpMsg ` .
Takes the client message as bytes , the client and server socket objects ,
and the client request id .""" | payload_document = OrderedDict ( )
flags , = _UNPACK_UINT ( msg [ : 4 ] )
pos = 4
if flags != 0 and flags != 2 :
raise ValueError ( 'OP_MSG flag must be 0 or 2 not %r' % ( flags , ) )
while pos < len ( msg ) :
payload_type , = _UNPACK_BYTE ( msg [ pos : pos + 1 ] )
pos += 1
payload_size , = _UNPACK_INT ( msg [ pos : pos + 4 ] )
if payload_type == 0 :
doc = bson . decode_all ( msg [ pos : pos + payload_size ] , CODEC_OPTIONS ) [ 0 ]
payload_document . update ( doc )
pos += payload_size
elif payload_type == 1 :
section_size , = _UNPACK_INT ( msg [ pos : pos + 4 ] )
pos += 4
identifier , pos = _get_c_string ( msg , pos )
# Section starts w / 4 - byte size prefix , identifier ends w / nil .
documents_len = section_size - len ( identifier ) - 1 - 4
documents = bson . decode_all ( msg [ pos : pos + documents_len ] , CODEC_OPTIONS )
payload_document [ identifier ] = documents
pos += documents_len
database = payload_document [ '$db' ]
return OpMsg ( payload_document , namespace = database , flags = flags , _client = client , request_id = request_id , _server = server ) |
def copy ( src , dst ) :
"""Handle the copying of a file or directory .
The destination basedir _ must _ exist .
: param src : A string containing the path of the source to copy . If the
source ends with a ' / ' , will become a recursive directory copy of source .
: param dst : A string containing the path to the destination . If the
destination ends with a ' / ' , will copy into the target directory .
: return : None""" | try :
shutil . copytree ( src , dst )
except OSError as exc :
if exc . errno == errno . ENOTDIR :
shutil . copy ( src , dst )
else :
raise |
def _split_token_to_subtokens ( token , subtoken_dict , max_subtoken_length ) :
"""Splits a token into subtokens defined in the subtoken dict .""" | ret = [ ]
start = 0
token_len = len ( token )
while start < token_len : # Find the longest subtoken , so iterate backwards .
for end in xrange ( min ( token_len , start + max_subtoken_length ) , start , - 1 ) :
subtoken = token [ start : end ]
if subtoken in subtoken_dict :
ret . append ( subtoken )
start = end
break
else : # Did not break
# If there is no possible encoding of the escaped token then one of the
# characters in the token is not in the alphabet . This should be
# impossible and would be indicative of a bug .
raise ValueError ( "Was unable to split token \"%s\" into subtokens." % token )
return ret |
def copy ( self , src , dst , other_system = None ) :
"""Copy object of the same storage .
Args :
src ( str ) : Path or URL .
dst ( str ) : Path or URL .
other _ system ( pycosio . storage . azure . _ AzureBaseSystem subclass ) :
The source storage system .""" | with _handle_azure_exception ( ) :
self . client . copy_file ( copy_source = ( other_system or self ) . _format_src_url ( src , self ) , ** self . get_client_kwargs ( dst ) ) |
def set_options ( pool_or_cursor , row_instance ) :
"for connection - level options that need to be set on Row instances" | # todo : move around an Options object instead
for option in ( 'JSON_READ' , ) :
setattr ( row_instance , option , getattr ( pool_or_cursor , option , None ) )
return row_instance |
def get_question_content ( self , number : str ) -> str :
"""取得課程中特定題目內容""" | try : # 操作所需資訊
params = { 'hwId' : number }
# 取得資料
response = self . __session . get ( self . __url + '/showHomework' , params = params , timeout = 0.5 , verify = False )
soup = BeautifulSoup ( response . text , 'html.parser' )
# 處理題目內容的 \ r
result = ''
content = soup . find ( 'body' ) . get_text ( ) . replace ( '繳交作業' , '' ) . strip ( )
for line in content . split ( '\r' ) :
result += line . strip ( ) + '\n'
# 回傳結果
return result
except requests . exceptions . Timeout :
return "Timeout" |
def destroy ( vm_ , call = None ) :
'''Destroy a lxc container''' | destroy_opt = __opts__ . get ( 'destroy' , False )
profiles = __opts__ . get ( 'profiles' , { } )
profile = __opts__ . get ( 'profile' , __opts__ . get ( 'internal_lxc_profile' , [ ] ) )
path = None
if profile and profile in profiles :
path = profiles [ profile ] . get ( 'path' , None )
action = __opts__ . get ( 'action' , '' )
if action != 'destroy' and not destroy_opt :
raise SaltCloudSystemExit ( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' )
if not get_configured_provider ( ) :
return
ret = { 'comment' : '{0} was not found' . format ( vm_ ) , 'result' : False }
if _salt ( 'lxc.info' , vm_ , path = path ) :
__utils__ [ 'cloud.fire_event' ] ( 'event' , 'destroying instance' , 'salt/cloud/{0}/destroying' . format ( vm_ ) , args = { 'name' : vm_ , 'instance_id' : vm_ } , sock_dir = __opts__ [ 'sock_dir' ] , transport = __opts__ [ 'transport' ] )
cret = _salt ( 'lxc.destroy' , vm_ , stop = True , path = path )
ret [ 'result' ] = cret [ 'result' ]
if ret [ 'result' ] :
ret [ 'comment' ] = '{0} was destroyed' . format ( vm_ )
__utils__ [ 'cloud.fire_event' ] ( 'event' , 'destroyed instance' , 'salt/cloud/{0}/destroyed' . format ( vm_ ) , args = { 'name' : vm_ , 'instance_id' : vm_ } , sock_dir = __opts__ [ 'sock_dir' ] , transport = __opts__ [ 'transport' ] )
if __opts__ . get ( 'update_cachedir' , False ) is True :
__utils__ [ 'cloud.delete_minion_cachedir' ] ( vm_ , __active_provider_name__ . split ( ':' ) [ 0 ] , __opts__ )
return ret |
def OnMove ( self , event ) :
"""Main window move event""" | # Store window position in config
position = self . main_window . GetScreenPositionTuple ( )
config [ "window_position" ] = repr ( position ) |
def GetArtifactDependencies ( rdf_artifact , recursive = False , depth = 1 ) :
"""Return a set of artifact dependencies .
Args :
rdf _ artifact : RDF object artifact .
recursive : If True recurse into dependencies to find their dependencies .
depth : Used for limiting recursion depth .
Returns :
A set of strings containing the dependent artifact names .
Raises :
RuntimeError : If maximum recursion depth reached .""" | deps = set ( )
for source in rdf_artifact . sources : # ARTIFACT is the legacy name for ARTIFACT _ GROUP
# per : https : / / github . com / ForensicArtifacts / artifacts / pull / 143
# TODO ( user ) : remove legacy support after migration .
if source . type in ( rdf_artifacts . ArtifactSource . SourceType . ARTIFACT , rdf_artifacts . ArtifactSource . SourceType . ARTIFACT_GROUP ) :
if source . attributes . GetItem ( "names" ) :
deps . update ( source . attributes . GetItem ( "names" ) )
if depth > 10 :
raise RuntimeError ( "Max artifact recursion depth reached." )
deps_set = set ( deps )
if recursive :
for dep in deps :
artifact_obj = REGISTRY . GetArtifact ( dep )
new_dep = GetArtifactDependencies ( artifact_obj , True , depth = depth + 1 )
if new_dep :
deps_set . update ( new_dep )
return deps_set |
async def reset_webhook ( self , check = True ) -> bool :
"""Reset webhook
: param check : check before deleting
: return :""" | if check :
wh = await self . bot . get_webhook_info ( )
if not wh . url :
return False
return await self . bot . delete_webhook ( ) |
def credential_delete ( self , * ids ) :
"""Delete one or more credentials .
: param ids : one or more credential ids""" | return self . raw_query ( "credential" , "delete" , data = { "credentials" : [ { "id" : str ( id ) } for id in ids ] } ) |
def to_jsonf ( self , fpath : str , encoding : str = 'utf8' , indent : int = None , ignore_none : bool = True , ignore_empty : bool = False ) -> str :
"""From instance to json file
: param fpath : Json file path
: param encoding : Json file encoding
: param indent : Number of indentation
: param ignore _ none : Properties which is None are excluded if True
: param ignore _ empty : Properties which is empty are excluded if True
: return : Json file path""" | return util . save_jsonf ( traverse ( self , ignore_none , force_value = True , ignore_empty = ignore_empty ) , fpath , encoding , indent ) |
def get_import_lines ( self ) :
"""Take the stored imports and converts them to lines""" | if self . imports :
return [ "from %s import %s" % ( value , key ) for key , value in self . imports . items ( ) ]
else :
return [ ] |
def create_chart ( self , html_path = 'index.html' , data_path = 'data.json' , js_path = 'rickshaw.min.js' , css_path = 'rickshaw.min.css' , html_prefix = '' ) :
'''Save bearcart output to HTML and JSON .
Parameters
html _ path : string , default ' index . html '
Path for html output
data _ path : string , default ' data . json '
Path for data JSON output
js _ path : string , default ' rickshaw . min . js '
If passed , the Rickshaw javascript library will be saved to the
path . The file must be named " rickshaw . min . js "
css _ path : string , default ' rickshaw . min . css '
If passed , the Rickshaw css library will be saved to the
path . The file must be named " rickshaw . min . css "
html _ prefix : Prefix path to be appended to all the other paths for file
creation , but not in the generated html file . This is needed if the
html file does not live in the same folder as the running python
script .
Returns
HTML , JSON , JS , and CSS
Example
> > > vis . create _ chart ( html _ path = ' myvis . html ' , data _ path = ' visdata . json ' ) ,
js _ path = ' rickshaw . min . js ' ,
cs _ path = ' rickshaw . min . css ' )''' | self . template_vars . update ( { 'data_path' : str ( data_path ) , 'js_path' : js_path , 'css_path' : css_path , 'chart_id' : self . chart_id , 'y_axis_id' : self . y_axis_id , 'legend_id' : self . legend_id , 'slider_id' : self . slider_id } )
self . _build_graph ( )
html = self . env . get_template ( 'bcart_template.html' )
self . HTML = html . render ( self . template_vars )
with open ( os . path . join ( html_prefix , html_path ) , 'w' ) as f :
f . write ( self . HTML )
with open ( os . path . join ( html_prefix , data_path ) , 'w' ) as f :
json . dump ( self . json_data , f , sort_keys = True , indent = 4 , separators = ( ',' , ': ' ) )
if js_path :
js = resource_string ( 'bearcart' , 'rickshaw.min.js' )
with open ( os . path . join ( html_prefix , js_path ) , 'w' ) as f :
f . write ( js )
if css_path :
css = resource_string ( 'bearcart' , 'rickshaw.min.css' )
with open ( os . path . join ( html_prefix , css_path ) , 'w' ) as f :
f . write ( css ) |
def _set_attr ( self , attr ) :
"""Given some text attribute , set the current cursor attributes
appropriately .""" | if attr in text :
self . _text_attr ( attr )
elif attr in colors [ "foreground" ] :
self . _color_attr ( "foreground" , attr )
elif attr in colors [ "background" ] :
self . _color_attr ( "background" , attr ) |
def leaders ( self , current_page , ** options ) :
'''Retrieve a page of leaders from the leaderboard .
@ param current _ page [ int ] Page to retrieve from the leaderboard .
@ param options [ Hash ] Options to be used when retrieving the page from the leaderboard .
@ return a page of leaders from the leaderboard .''' | return self . leaders_in ( self . leaderboard_name , current_page , ** options ) |
def gdal_translate ( src , dst , options ) :
"""a simple wrapper for ` gdal . Translate < https : / / gdal . org / python / osgeo . gdal - module . html # Translate > ` _
Parameters
src : str , : osgeo : class : ` ogr . DataSource ` or : osgeo : class : ` gdal . Dataset `
the input data set
dst : str
the output data set
options : dict
additional parameters passed to gdal . Translate ;
see ` gdal . TranslateOptions < http : / / gdal . org / python / osgeo . gdal - module . html # TranslateOptions > ` _
Returns""" | out = gdal . Translate ( dst , src , options = gdal . TranslateOptions ( ** options ) )
out = None |
def _is_unpacked_egg ( path ) :
"""Determine if given path appears to be an unpacked egg .""" | return ( _is_egg_path ( path ) and os . path . isfile ( os . path . join ( path , 'EGG-INFO' , 'PKG-INFO' ) ) ) |
def tds7_process_result ( self ) :
"""Reads and processes COLMETADATA stream
This stream contains a list of returned columns .
Stream format link : http : / / msdn . microsoft . com / en - us / library / dd357363 . aspx""" | self . log_response_message ( 'got COLMETADATA' )
r = self . _reader
# read number of columns and allocate the columns structure
num_cols = r . get_smallint ( )
# This can be a DUMMY results token from a cursor fetch
if num_cols == - 1 :
return
self . param_info = None
self . has_status = False
self . ret_status = None
self . skipped_to_status = False
self . rows_affected = tds_base . TDS_NO_COUNT
self . more_rows = True
self . row = [ None ] * num_cols
self . res_info = info = _Results ( )
# loop through the columns populating COLINFO struct from
# server response
header_tuple = [ ]
for col in range ( num_cols ) :
curcol = tds_base . Column ( )
info . columns . append ( curcol )
self . get_type_info ( curcol )
curcol . column_name = r . read_ucs2 ( r . get_byte ( ) )
precision = curcol . serializer . precision
scale = curcol . serializer . scale
size = curcol . serializer . size
header_tuple . append ( ( curcol . column_name , curcol . serializer . get_typeid ( ) , None , size , precision , scale , curcol . flags & tds_base . Column . fNullable ) )
info . description = tuple ( header_tuple )
return info |
def create_CTL ( fname , tbl_name , col_list , TRUNC_OR_APPEND , delim = ',' ) :
"""create _ CTL ( fname _ control _ file , tbl _ name , src _ file , cols , ' TRUNCATE ' )""" | with open ( fname , 'w' ) as ct :
ct . write ( 'LOAD DATA\n' )
ct . write ( TRUNC_OR_APPEND + '\n' )
ct . write ( 'into table ' + tbl_name + '\n' )
ct . write ( "fields terminated by '" + delim + "'\n" )
ct . write ( 'optionally Enclosed by \'"\'\n' )
ct . write ( 'TRAILING NULLCOLS\n' )
ct . write ( '(\n' )
ct . write ( ',\n' . join ( c for c in col_list ) )
ct . write ( ')\n' ) |
def info_to_datatype_v4 ( signed , little_endian ) :
"""map CAN signal to MDF integer types
Parameters
signed : bool
signal is flagged as signed in the CAN database
little _ endian : bool
signal is flagged as little endian ( Intel ) in the CAN database
Returns
datatype : int
integer code for MDF channel data type""" | if signed :
if little_endian :
datatype = v4c . DATA_TYPE_SIGNED_INTEL
else :
datatype = v4c . DATA_TYPE_SIGNED_MOTOROLA
else :
if little_endian :
datatype = v4c . DATA_TYPE_UNSIGNED_INTEL
else :
datatype = v4c . DATA_TYPE_UNSIGNED_MOTOROLA
return datatype |
def pending_confirmations ( self ) :
"""Return all published messages that have yet to be acked , nacked , or
returned .
: return : [ ( int , Published ) ]""" | return sorted ( [ ( idx , msg ) for idx , msg in enumerate ( self . published_messages ) if not msg . future . done ( ) ] , key = lambda x : x [ 1 ] . delivery_tag ) |
def cmd_gimbal_point ( self , args ) :
'''control gimbal pointing''' | if len ( args ) != 3 :
print ( "usage: gimbal point ROLL PITCH YAW" )
return
( roll , pitch , yaw ) = ( float ( args [ 0 ] ) , float ( args [ 1 ] ) , float ( args [ 2 ] ) )
self . master . mav . mount_control_send ( self . target_system , self . target_component , pitch * 100 , roll * 100 , yaw * 100 , 0 ) |
def cli_plugin_add_help ( help ) :
"""Decorator generator that adds the cli help to the cli plugin based on the
decorated function
Args :
help ( str ) : help string for the cli plugin
Returns :
function : Decorator that builds or extends the cliplugin for the
decorated function , setting the given help
Examples :
> > > @ cli _ plugin _ add _ help ( ' my help string ' )
. . . def test ( * * kwargs ) :
. . . print ' test '
> > > print test . _ _ class _ _
< class ' cli . CLIPluginFuncWrapper ' >
> > > print test . help
my help string
> > > @ cli _ plugin _ add _ help ( ' my help string ' )
. . . @ cli _ plugin ( )
. . . def test ( * * kwargs ) :
. . . print ' test '
> > > print test . _ _ class _ _
< class ' cli . CLIPluginFuncWrapper ' >
> > > print test . help
my help string""" | def decorator ( func ) :
if not isinstance ( func , CLIPluginFuncWrapper ) :
func = CLIPluginFuncWrapper ( do_run = func )
func . set_help ( help )
return func
return decorator |
def _draw_visible_area ( self , painter ) :
"""Draw the visible area .
This method does not take folded blocks into account .
: type painter : QtGui . QPainter""" | if self . editor . visible_blocks :
start = self . editor . visible_blocks [ 0 ] [ - 1 ]
end = self . editor . visible_blocks [ - 1 ] [ - 1 ]
rect = QtCore . QRect ( )
rect . setX ( 0 )
rect . setY ( start . blockNumber ( ) * self . get_marker_height ( ) )
rect . setWidth ( self . sizeHint ( ) . width ( ) )
rect . setBottom ( end . blockNumber ( ) * self . get_marker_height ( ) )
if self . editor . background . lightness ( ) < 128 :
c = self . editor . background . darker ( 150 )
else :
c = self . editor . background . darker ( 110 )
c . setAlpha ( 128 )
painter . fillRect ( rect , c ) |
def get_cover_image ( self , size = SIZE_EXTRA_LARGE ) :
"""Returns a URI to the cover image
size can be one of :
SIZE _ EXTRA _ LARGE
SIZE _ LARGE
SIZE _ MEDIUM
SIZE _ SMALL""" | if "image" not in self . info :
self . info [ "image" ] = _extract_all ( self . _request ( self . ws_prefix + ".getInfo" , cacheable = True ) , "image" )
return self . info [ "image" ] [ size ] |
def rmod ( self , other , axis = "columns" , level = None , fill_value = None ) :
"""Mod this DataFrame against another DataFrame / Series / scalar .
Args :
other : The object to use to apply the div against this .
axis : The axis to div over .
level : The Multilevel index level to apply div over .
fill _ value : The value to fill NaNs with .
Returns :
A new DataFrame with the rdiv applied .""" | return self . _binary_op ( "rmod" , other , axis = axis , level = level , fill_value = fill_value ) |
def _log ( file_list , list_name , in_path ) :
"""Logs result at debug level""" | file_names = '\n' . join ( file_list )
LOG . debug ( "\nDiscovered %(size)d %(name)s file(s) in %(path)s:\n" "%(files)s\n" , { 'size' : len ( file_list ) , 'name' : list_name , 'path' : in_path , 'files' : file_names } ) |
def remove_service_listener ( self , listener ) :
"""Unregisters a service listener
: param listener : The service listener
: return : True if the listener has been unregistered""" | with self . __svc_lock :
try :
data = self . __listeners_data . pop ( listener )
spec_listeners = self . __svc_listeners [ data . specification ]
spec_listeners . remove ( data )
if not spec_listeners :
del self . __svc_listeners [ data . specification ]
return True
except KeyError :
return False |
def _extract_comments ( self ) :
"""Retrieve all comments from the file""" | self . _det_file . seek ( 0 , 0 )
for line in self . _det_file . readlines ( ) :
line = line . strip ( )
if line . startswith ( '#' ) :
self . add_comment ( line [ 1 : ] ) |
def get_or_create_calendar_for_object ( self , obj , distinction = '' , name = None ) :
"""> > > user = User ( username = " jeremy " )
> > > user . save ( )
> > > calendar = Calendar . objects . get _ or _ create _ calendar _ for _ object ( user , name = " Jeremy ' s Calendar " )
> > > calendar . name
" Jeremy ' s Calendar " """ | try :
return self . get_calendar_for_object ( obj , distinction )
except Calendar . DoesNotExist :
if name is None :
calendar = self . model ( name = str ( obj ) )
else :
calendar = self . model ( name = name )
calendar . slug = slugify ( calendar . name )
calendar . save ( )
calendar . create_relation ( obj , distinction )
return calendar |
def max ( self , e , extra_constraints = ( ) , exact = None ) :
"""Return the maximum value of expression ` e ` .
: param e : expression ( an AST ) to evaluate
: param extra _ constraints : extra constraints ( as ASTs ) to add to the solver for this solve
: param exact : if False , return approximate solutions .
: return : the maximum possible value of e ( backend object )""" | if exact is False and o . VALIDATE_APPROXIMATIONS in self . state . options :
ar = self . _solver . max ( e , extra_constraints = self . _adjust_constraint_list ( extra_constraints ) , exact = False )
er = self . _solver . max ( e , extra_constraints = self . _adjust_constraint_list ( extra_constraints ) )
assert er <= ar
return ar
return self . _solver . max ( e , extra_constraints = self . _adjust_constraint_list ( extra_constraints ) , exact = exact ) |
def deploy ( self , version_name , path , runtime_version = None ) :
"""Deploy a model version to the cloud .
Args :
version _ name : the name of the version in short form , such as " v1 " .
path : the Google Cloud Storage path ( gs : / / . . . ) which contains the model files .
runtime _ version : the ML Engine runtime version as a string , example ' 1.2 ' .
See https : / / cloud . google . com / ml - engine / docs / concepts / runtime - version - list
for a list of runtimes . If None , the ML Engine service will pick one .
Raises : Exception if the path is invalid or does not contain expected files .
Exception if the service returns invalid response .""" | if not path . startswith ( 'gs://' ) :
raise Exception ( 'Invalid path. Only Google Cloud Storage path (gs://...) is accepted.' )
# If there is no " export . meta " or " saved _ model . pb " under path but there is
# path / model / export . meta or path / model / saved _ model . pb , then append / model to the path .
if not datalab . storage . Object . from_url ( os . path . join ( path , 'export.meta' ) ) . exists ( ) and not datalab . storage . Object . from_url ( os . path . join ( path , 'saved_model.pb' ) ) . exists ( ) :
if datalab . storage . Object . from_url ( os . path . join ( path , 'model' , 'export.meta' ) ) . exists ( ) or datalab . storage . Object . from_url ( os . path . join ( path , 'model' , 'saved_model.pb' ) ) . exists ( ) :
path = os . path . join ( path , 'model' )
else :
print ( 'Cannot find export.meta or saved_model.pb, but continue with deployment anyway.' )
body = { 'name' : self . _model_name }
parent = 'projects/' + self . _project_id
try :
self . _api . projects ( ) . models ( ) . create ( body = body , parent = parent ) . execute ( )
except : # Trying to create an already existing model gets an error . Ignore it .
pass
body = { 'name' : version_name , 'deployment_uri' : path , }
if runtime_version :
body [ 'runtime_version' ] = runtime_version
response = self . _api . projects ( ) . models ( ) . versions ( ) . create ( body = body , parent = self . _full_model_name ) . execute ( )
if 'name' not in response :
raise Exception ( 'Invalid response from service. "name" is not found.' )
_util . wait_for_long_running_operation ( response [ 'name' ] ) |
def fit ( self , y ) :
"""Estimate censoring distribution from training data .
Parameters
y : structured array , shape = ( n _ samples , )
A structured array containing the binary event indicator
as first field , and time of event or time of censoring as
second field .
Returns
self""" | event , time = check_y_survival ( y )
if event . all ( ) :
self . unique_time_ = numpy . unique ( time )
self . prob_ = numpy . ones ( self . unique_time_ . shape [ 0 ] )
else :
unique_time , prob = kaplan_meier_estimator ( ~ event , time )
self . unique_time_ = numpy . concatenate ( ( [ - numpy . infty ] , unique_time ) )
self . prob_ = numpy . concatenate ( ( [ 1. ] , prob ) )
return self |
def __get_switch_arr ( work_sheet , row_num ) :
'''if valud of the column of the row is ` 1 ` , it will be added to the array .''' | u_dic = [ ]
for col_idx in FILTER_COLUMNS :
cell_val = work_sheet [ '{0}{1}' . format ( col_idx , row_num ) ] . value
if cell_val in [ 1 , '1' ] : # Appending the slug name of the switcher .
u_dic . append ( work_sheet [ '{0}1' . format ( col_idx ) ] . value . strip ( ) . split ( ',' ) [ 0 ] )
return u_dic |
def p_funcvardecl ( self , p ) :
"""funcvardecl : decl
| integerdecl""" | if isinstance ( p [ 1 ] , Decl ) :
for r in p [ 1 ] . list :
if ( not isinstance ( r , Input ) and not isinstance ( r , Reg ) and not isinstance ( r , Integer ) ) :
raise ParseError ( "Syntax Error" )
p [ 0 ] = p [ 1 ]
p . set_lineno ( 0 , p . lineno ( 1 ) ) |
def setpassword ( self , pwd ) :
"""Set default password for encrypted files .""" | if pwd and not isinstance ( pwd , bytes ) :
raise TypeError ( "pwd: expected bytes, got %s" % type ( pwd ) )
if pwd :
self . pwd = pwd
else :
self . pwd = None |
def human_or_00 ( X , y , model_generator , method_name ) :
"""OR ( false / false )
This tests how well a feature attribution method agrees with human intuition
for an OR operation combined with linear effects . This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true :
if fever : + 2 points
if cough : + 2 points
if fever or cough : + 6 points
transform = " identity "
sort _ order = 0""" | return _human_or ( X , model_generator , method_name , False , False ) |
def unmerge ( self , unmerge_area , tab ) :
"""Unmerges all cells in unmerge _ area""" | top , left , bottom , right = unmerge_area
selection = Selection ( [ ( top , left ) ] , [ ( bottom , right ) ] , [ ] , [ ] , [ ] )
attr = { "merge_area" : None , "locked" : False }
self . _set_cell_attr ( selection , tab , attr ) |
def dfa_minimization ( dfa : dict ) -> dict :
"""Returns the minimization of the DFA in input through a
greatest fix - point method .
Given a completed DFA : math : ` A = ( Σ , S , s _ 0 , ρ , F ) ` there
exists a single minimal DFA : math : ` A _ m `
which is equivalent to A , i . e . reads the same language
: math : ` L ( A ) = L ( A _ m ) ` and with a minimal number of states .
To construct such a DFA we exploit bisimulation as a suitable
equivalence relation between states .
A bisimulation relation : math : ` E ∈ S × S ` is a relation
between states that satisfies the following condition :
if : math : ` ( s , t ) ∈ E ` then :
• s ∈ F iff t ∈ F ;
• For all : math : ` ( s _ X , a ) ` such that : math : ` ρ ( s , a ) = s _ X ` ,
there exists : math : ` t _ X ` such that : math : ` ρ ( t , a ) = t _ X `
and : math : ` ( s _ X , t _ X ) ∈ E ` ;
• For all : math : ` ( t _ X , a ) ` such that : math : ` ρ ( t , a ) = t _ X ` ,
there exists : math : ` s _ X ` such that : math : ` ρ ( s , a ) = s _ X `
and : math : ` ( s _ X , t _ X ) ∈ E ` .
: param dict dfa : input DFA .
: return : * ( dict ) * representing the minimized DFA .""" | dfa = dfa_completion ( deepcopy ( dfa ) )
# # # Greatest - fixpoint
z_current = set ( )
z_next = set ( )
# First bisimulation condition check ( can be done just once )
# s ∈ F iff t ∈ F
for state_s in dfa [ 'states' ] :
for state_t in dfa [ 'states' ] :
if ( state_s in dfa [ 'accepting_states' ] and state_t in dfa [ 'accepting_states' ] ) or ( state_s not in dfa [ 'accepting_states' ] and state_t not in dfa [ 'accepting_states' ] ) :
z_next . add ( ( state_s , state_t ) )
# Second and third condition of bisimularity check
while z_current != z_next :
z_current = z_next
z_next = z_current . copy ( )
for ( state_1 , state_2 ) in z_current : # for all s0 , a s . t . ρ ( s , a ) = s _ 0 , there exists t 0
# s . t . ρ ( t , a ) = t 0 and ( s _ 0 , t 0 ) ∈ Z i ;
for a in dfa [ 'alphabet' ] :
if ( state_1 , a ) in dfa [ 'transitions' ] and ( state_2 , a ) in dfa [ 'transitions' ] :
if ( dfa [ 'transitions' ] [ state_1 , a ] , dfa [ 'transitions' ] [ state_2 , a ] ) not in z_current :
z_next . remove ( ( state_1 , state_2 ) )
break
else : # action a not possible in state element [ 0]
# or element [ 1]
z_next . remove ( ( state_1 , state_2 ) )
break
# # # Equivalence Sets
equivalence = dict ( )
for ( state_1 , state_2 ) in z_current :
equivalence . setdefault ( state_1 , set ( ) ) . add ( state_2 )
# # # Minimal DFA construction
dfa_min = { 'alphabet' : dfa [ 'alphabet' ] . copy ( ) , 'states' : set ( ) , 'initial_state' : dfa [ 'initial_state' ] , 'accepting_states' : set ( ) , 'transitions' : dfa [ 'transitions' ] . copy ( ) }
# select one element for each equivalence set
for equivalence_set in equivalence . values ( ) :
if dfa_min [ 'states' ] . isdisjoint ( equivalence_set ) :
e = equivalence_set . pop ( )
dfa_min [ 'states' ] . add ( e )
# TODO highlight this instruction
equivalence_set . add ( e )
dfa_min [ 'accepting_states' ] = dfa_min [ 'states' ] . intersection ( dfa [ 'accepting_states' ] )
for t in dfa [ 'transitions' ] :
if t [ 0 ] not in dfa_min [ 'states' ] :
dfa_min [ 'transitions' ] . pop ( t )
elif dfa [ 'transitions' ] [ t ] not in dfa_min [ 'states' ] :
dfa_min [ 'transitions' ] [ t ] = equivalence [ dfa [ 'transitions' ] [ t ] ] . intersection ( dfa_min [ 'states' ] ) . pop ( )
return dfa_min |
def _cont_to_discrete_object ( cls , X , F , L , Qc , compute_derivatives = False , grad_params_no = None , P_inf = None , dP_inf = None , dF = None , dQc = None , dt0 = None ) :
"""Function return the object which is used in Kalman filter and / or
smoother to obtain matrices A , Q and their derivatives for discrete model
from the continuous model .
There are 2 objects AQcompute _ once and AQcompute _ batch and the function
returs the appropriate one based on the number of different time steps .
Input :
X , F , L , Qc : matrices
Continuous model matrices
f _ a : function
Dynamic Function is attached to the Dynamic _ Model _ Callables class
compute _ derivatives : boolean
Whether to compute derivatives
grad _ params _ no : int
Number of parameters in the gradient
P _ inf , dP _ inf , dF , dQ : matrices and 3D objects
Data necessary to compute derivatives .
Output :
AQcomp : object
Its methods return matrices ( and optionally derivatives ) for the
discrete state - space model .""" | unique_round_decimals = 10
threshold_number_of_unique_time_steps = 20
# above which matrices are separately each time
dt = np . empty ( ( X . shape [ 0 ] , ) )
dt [ 1 : ] = np . diff ( X [ : , 0 ] , axis = 0 )
if dt0 is None :
dt [ 0 ] = 0
# dt [ 1]
else :
if isinstance ( dt0 , str ) :
dt = dt [ 1 : ]
else :
dt [ 0 ] = dt0
unique_indices = np . unique ( np . round ( dt , decimals = unique_round_decimals ) )
number_unique_indices = len ( unique_indices )
# import pdb ; pdb . set _ trace ( )
if use_cython :
class AQcompute_batch ( state_space_cython . AQcompute_batch_Cython ) :
def __init__ ( self , F , L , Qc , dt , compute_derivatives = False , grad_params_no = None , P_inf = None , dP_inf = None , dF = None , dQc = None ) :
As , Qs , reconstruct_indices , dAs , dQs = ContDescrStateSpace . lti_sde_to_descrete ( F , L , Qc , dt , compute_derivatives , grad_params_no = grad_params_no , P_inf = P_inf , dP_inf = dP_inf , dF = dF , dQc = dQc )
super ( AQcompute_batch , self ) . __init__ ( As , Qs , reconstruct_indices , dAs , dQs )
else :
AQcompute_batch = cls . AQcompute_batch_Python
if number_unique_indices > threshold_number_of_unique_time_steps :
AQcomp = cls . AQcompute_once ( F , L , Qc , dt , compute_derivatives = compute_derivatives , grad_params_no = grad_params_no , P_inf = P_inf , dP_inf = dP_inf , dF = dF , dQc = dQc )
if print_verbose :
print ( "CDO: Continue-to-discrete INSTANTANEOUS object is created." )
print ( "CDO: Number of different time steps: %i" % ( number_unique_indices , ) )
else :
AQcomp = AQcompute_batch ( F , L , Qc , dt , compute_derivatives = compute_derivatives , grad_params_no = grad_params_no , P_inf = P_inf , dP_inf = dP_inf , dF = dF , dQc = dQc )
if print_verbose :
print ( "CDO: Continue-to-discrete BATCH object is created." )
print ( "CDO: Number of different time steps: %i" % ( number_unique_indices , ) )
print ( "CDO: Total size if its data: %i" % ( AQcomp . total_size_of_data , ) )
return AQcomp |
async def read ( self , * _id ) :
"""Read data from database table .
Accepts ids of entries .
Returns list of results if success
or string with error code and explanation .
read ( * id ) = > [ ( result ) , ( result ) ] ( if success )
read ( * id ) = > [ ] ( if missed )
read ( ) = > { " error " : 400 , " reason " : " Missed required fields " }""" | if not _id :
return { "error" : 400 , "reason" : "Missed required fields" }
result = [ ]
for i in _id :
document = await self . collection . find_one ( { "id" : i } )
try :
result . append ( { i : document [ i ] for i in document if i != "_id" } )
except :
continue
return result |
def analyse_ligand_sasa ( self ) :
"""Analysis of ligand SASA .""" | i = 0
start = timer ( )
if self . trajectory == [ ] :
self . trajectory = [ self . topology_data . universe . filename ]
try :
for traj in self . trajectory :
new_traj = mdtraj . load ( traj , top = self . topology_data . universe . filename )
# Analyse only non - H ligand
ligand_slice = new_traj . atom_slice ( atom_indices = self . topology_data . universe . ligand_noH . ids )
self . sasa = mdtraj . shrake_rupley ( ligand_slice )
self . atom_sasa [ i ] = self . assign_per_atom_sasa ( )
i += 1
self . total_sasa = self . get_total_per_atom_sasa ( )
except KeyError as e :
print "WARNING: SASA analysis cannot be performed due to incorrect atom names in"
print "the topology " , e
print "SASA: " + str ( timer ( ) - start ) |
def get_header ( message , name ) :
"""Gets an email . message . Message and a header name and returns
the mail header decoded with the correct charset .
Args :
message ( email . message . Message ) : email message object
name ( string ) : header to get
Returns :
decoded header""" | header = message . get ( name )
log . debug ( "Getting header {!r}: {!r}" . format ( name , header ) )
if header :
return decode_header_part ( header )
return six . text_type ( ) |
def process_request ( self , request ) :
"""Setup the profiler for a profiling run and clear the SQL query log .
If this is a resort of an existing profiling run , just return
the resorted list .""" | def unpickle ( params ) :
stats = unpickle_stats ( b64decode ( params . get ( 'stats' , '' ) ) )
queries = cPickle . loads ( b64decode ( params . get ( 'queries' , '' ) ) )
return stats , queries
if request . method != 'GET' and not ( request . META . get ( 'HTTP_CONTENT_TYPE' , request . META . get ( 'CONTENT_TYPE' , '' ) ) in [ 'multipart/form-data' , 'application/x-www-form-urlencoded' ] ) :
return
if ( request . REQUEST . get ( 'profile' , False ) and ( settings . DEBUG == True or request . user . is_staff ) ) :
request . statsfile = tempfile . NamedTemporaryFile ( )
params = request . REQUEST
if ( params . get ( 'show_stats' , False ) and params . get ( 'show_queries' , '1' ) == '1' ) : # Instantly re - sort the existing stats data
stats , queries = unpickle ( params )
return display_stats ( request , stats , queries )
elif ( params . get ( 'show_queries' , False ) and params . get ( 'show_stats' , '1' ) == '1' ) :
stats , queries = unpickle ( params )
return display_queries ( request , stats , queries )
else : # We don ' t have previous data , so initialize the profiler
request . profiler = hotshot . Profile ( request . statsfile . name )
reset_queries ( ) |
def _http_request ( url , method = 'GET' , headers = None , data = None ) :
'''Make the HTTP request and return the body as python object .''' | req = requests . request ( method , url , headers = headers , data = data )
ret = _default_ret ( )
ok_status = METHOD_OK_STATUS . get ( method , 200 )
if req . status_code != ok_status :
ret . update ( { 'comment' : req . json ( ) . get ( 'error' , '' ) } )
return ret
ret . update ( { 'result' : True , 'out' : req . json ( ) if method != 'DELETE' else None # no body when DELETE
} )
return ret |
def main ( args = sys . argv ) :
"""Run the work ( ) method from the class instance in the file " job - instance . pickle " .""" | try : # Set up logging .
logging . basicConfig ( level = logging . WARN )
work_dir = args [ 1 ]
assert os . path . exists ( work_dir ) , "First argument to lsf_runner.py must be a directory that exists"
do_work_on_compute_node ( work_dir )
except Exception as exc : # Dump encoded data that we will try to fetch using mechanize
print ( exc )
raise |
def add_ip_scope ( name , description , auth , url , startip = None , endip = None , network_address = None ) :
"""Function takes input of four strings Start Ip , endIp , name , and description to add new Ip Scope
to terminal access in the HPE IMC base platform
: param name : str Name of the owner of this IP scope ex . ' admin '
: param description : str description of the Ip scope
: param auth : requests auth object # usually auth . creds from auth pyhpeimc . auth . class
: param url : base url of IMC RS interface # usually auth . url from pyhpeimc . auth . authclass
: param startip : str Start of IP address scope ex . ' 10.101.0.1'
: param endip : str End of IP address scope ex . ' 10.101.0.254'
: param network _ address : ipv4 network address + subnet bits of target scope
: return : 200 if successfull
: rtype :
> > > from pyhpeimc . auth import *
> > > from pyhpeimc . plat . termaccess import *
> > > auth = IMCAuth ( " http : / / " , " 10.101.0.203 " , " 8080 " , " admin " , " admin " )
> > > delete _ ip _ scope ( ' 10.50.0.0/24 ' , auth . creds , auth . url )
< Response [ 204 ] >
> > > new _ scope = add _ ip _ scope ( ' 10.50.0.1 ' , ' 10.50.0.254 ' , ' cyoung ' , ' test group ' , auth . creds , auth . url )
> > > assert type ( new _ scope ) is int
> > > assert new _ scope = = 200
> > > existing _ scope = add _ ip _ scope ( ' 10.50.0.1 ' , ' 10.50.0.254 ' , ' cyoung ' , ' test group ' , auth . creds , auth . url )
> > > assert type ( existing _ scope ) is int
> > > assert existing _ scope = = 409""" | if network_address is not None :
nw_address = ipaddress . IPv4Network ( network_address )
startip = nw_address [ 1 ]
endip = nw_address [ - 2 ]
f_url = url + "/imcrs/res/access/assignedIpScope"
payload = ( '''{ "startIp": "%s", "endIp": "%s","name": "%s","description": "%s" }''' % ( str ( startip ) , str ( endip ) , str ( name ) , str ( description ) ) )
response = requests . post ( f_url , auth = auth , headers = HEADERS , data = payload )
try :
if response . status_code == 200 : # print ( " IP Scope Successfully Created " )
return response . status_code
elif response . status_code == 409 : # print ( " IP Scope Already Exists " )
return response . status_code
except requests . exceptions . RequestException as error :
return "Error:\n" + str ( error ) + " add_ip_scope: An Error has occured" |
def set_default_decoder_parameters ( dparams_p ) :
"""Wrapper for opj _ set _ default _ decoder _ parameters .""" | argtypes = [ ctypes . POINTER ( DecompressionParametersType ) ]
OPENJPEG . opj_set_default_decoder_parameters . argtypes = argtypes
OPENJPEG . opj_set_default_decoder_parameters ( dparams_p ) |
def _num_required_args ( func ) :
"""Number of args for func
> > > def foo ( a , b , c = None ) :
. . . return a + b + c
> > > _ num _ required _ args ( foo )
> > > def bar ( * args ) :
. . . return sum ( args )
> > > print ( _ num _ required _ args ( bar ) )
None
borrowed from : https : / / github . com / pytoolz / toolz""" | try :
spec = inspect . getargspec ( func )
if spec . varargs :
return None
num_defaults = len ( spec . defaults ) if spec . defaults else 0
return len ( spec . args ) - num_defaults
except TypeError :
return None |
def equal ( obj1 , obj2 ) :
"""Calculate equality between two ( Comparable ) objects .""" | Comparable . log ( obj1 , obj2 , '==' )
equality = obj1 . equality ( obj2 )
Comparable . log ( obj1 , obj2 , '==' , result = equality )
return equality |
def replace ( self , * args , ** kargs ) :
"""lst . replace ( < field > , [ < oldvalue > , ] < newvalue > )
lst . replace ( ( fld , [ ov ] , nv ) , ( fld , [ ov , ] nv ) , . . . )
if ov is None , all values are replaced
ex :
lst . replace ( IP . src , " 192.168.1.1 " , " 10.0.0.1 " )
lst . replace ( IP . ttl , 64 )
lst . replace ( ( IP . ttl , 64 ) , ( TCP . sport , 666 , 777 ) , )""" | delete_checksums = kargs . get ( "delete_checksums" , False )
x = PacketList ( name = "Replaced %s" % self . listname )
if not isinstance ( args [ 0 ] , tuple ) :
args = ( args , )
for p in self . res :
p = self . _elt2pkt ( p )
copied = False
for scheme in args :
fld = scheme [ 0 ]
old = scheme [ 1 ]
# not used if len ( scheme ) = = 2
new = scheme [ - 1 ]
for o in fld . owners :
if o in p :
if len ( scheme ) == 2 or p [ o ] . getfieldval ( fld . name ) == old : # noqa : E501
if not copied :
p = p . copy ( )
if delete_checksums :
p . delete_checksums ( )
copied = True
setattr ( p [ o ] , fld . name , new )
x . append ( p )
return x |
def update ( self ) :
"""Update the processes stats .""" | # Reset the stats
self . processlist = [ ]
self . reset_processcount ( )
# Do not process if disable tag is set
if self . disable_tag :
return
# Time since last update ( for disk _ io rate computation )
time_since_update = getTimeSinceLastUpdate ( 'process_disk' )
# Grab standard stats
standard_attrs = [ 'cmdline' , 'cpu_percent' , 'cpu_times' , 'memory_info' , 'memory_percent' , 'name' , 'nice' , 'pid' , 'ppid' , 'status' , 'username' , 'status' , 'num_threads' ]
# io _ counters availability : Linux , BSD , Windows , AIX
if not MACOS and not SUNOS :
standard_attrs += [ 'io_counters' ]
# gids availability : Unix
if not WINDOWS :
standard_attrs += [ 'gids' ]
# and build the processes stats list ( psutil > = 5.3.0)
self . processlist = [ p . info for p in psutil . process_iter ( attrs = standard_attrs , ad_value = None ) # OS - related processes filter
if not ( BSD and p . info [ 'name' ] == 'idle' ) and not ( WINDOWS and p . info [ 'name' ] == 'System Idle Process' ) and not ( MACOS and p . info [ 'name' ] == 'kernel_task' ) and # Kernel threads filter
not ( self . no_kernel_threads and LINUX and p . info [ 'gids' ] . real == 0 ) and # User filter
not ( self . _filter . is_filtered ( p . info ) ) ]
# Sort the processes list by the current sort _ key
self . processlist = sort_stats ( self . processlist , sortedby = self . sort_key , reverse = True )
# Update the processcount
self . update_processcount ( self . processlist )
# Loop over processes and add metadata
first = True
for proc in self . processlist : # Get extended stats , only for top processes ( see issue # 403 ) .
if first and not self . disable_extended_tag : # - cpu _ affinity ( Linux , Windows , FreeBSD )
# - ionice ( Linux and Windows > Vista )
# - num _ ctx _ switches ( not available on Illumos / Solaris )
# - num _ fds ( Unix - like )
# - num _ handles ( Windows )
# - memory _ maps ( only swap , Linux )
# https : / / www . cyberciti . biz / faq / linux - which - process - is - using - swap /
# - connections ( TCP and UDP )
extended = { }
try :
top_process = psutil . Process ( proc [ 'pid' ] )
extended_stats = [ 'cpu_affinity' , 'ionice' , 'num_ctx_switches' ]
if LINUX : # num _ fds only avalable on Unix system ( see issue # 1351)
extended_stats += [ 'num_fds' ]
if WINDOWS :
extended_stats += [ 'num_handles' ]
# Get the extended stats
extended = top_process . as_dict ( attrs = extended_stats , ad_value = None )
if LINUX :
try :
extended [ 'memory_swap' ] = sum ( [ v . swap for v in top_process . memory_maps ( ) ] )
except psutil . NoSuchProcess :
pass
except ( psutil . AccessDenied , NotImplementedError ) : # NotImplementedError : / proc / $ { PID } / smaps file doesn ' t exist
# on kernel < 2.6.14 or CONFIG _ MMU kernel configuration option
# is not enabled ( see psutil # 533 / glances # 413 ) .
extended [ 'memory_swap' ] = None
try :
extended [ 'tcp' ] = len ( top_process . connections ( kind = "tcp" ) )
extended [ 'udp' ] = len ( top_process . connections ( kind = "udp" ) )
except ( psutil . AccessDenied , psutil . NoSuchProcess ) : # Manage issue1283 ( psutil . AccessDenied )
extended [ 'tcp' ] = None
extended [ 'udp' ] = None
except ( psutil . NoSuchProcess , ValueError , AttributeError ) as e :
logger . error ( 'Can not grab extended stats ({})' . format ( e ) )
extended [ 'extended_stats' ] = False
else :
logger . debug ( 'Grab extended stats for process {}' . format ( proc [ 'pid' ] ) )
extended [ 'extended_stats' ] = True
proc . update ( extended )
first = False
# / End of extended stats
# Time since last update ( for disk _ io rate computation )
proc [ 'time_since_update' ] = time_since_update
# Process status ( only keep the first char )
proc [ 'status' ] = str ( proc [ 'status' ] ) [ : 1 ] . upper ( )
# Process IO
# procstat [ ' io _ counters ' ] is a list :
# [ read _ bytes , write _ bytes , read _ bytes _ old , write _ bytes _ old , io _ tag ]
# If io _ tag = 0 > Access denied or first time ( display " ? " )
# If io _ tag = 1 > No access denied ( display the IO rate )
if 'io_counters' in proc and proc [ 'io_counters' ] is not None :
io_new = [ proc [ 'io_counters' ] . read_bytes , proc [ 'io_counters' ] . write_bytes ]
# For IO rate computation
# Append saved IO r / w bytes
try :
proc [ 'io_counters' ] = io_new + self . io_old [ proc [ 'pid' ] ]
io_tag = 1
except KeyError :
proc [ 'io_counters' ] = io_new + [ 0 , 0 ]
io_tag = 0
# then save the IO r / w bytes
self . io_old [ proc [ 'pid' ] ] = io_new
else :
proc [ 'io_counters' ] = [ 0 , 0 ] + [ 0 , 0 ]
io_tag = 0
# Append the IO tag ( for display )
proc [ 'io_counters' ] += [ io_tag ]
# Compute the maximum value for keys in self . _ max _ values _ list : CPU , MEM
# Usefull to highlight the processes with maximum values
for k in self . _max_values_list :
values_list = [ i [ k ] for i in self . processlist if i [ k ] is not None ]
if values_list != [ ] :
self . set_max_values ( k , max ( values_list ) ) |
def encrypt_file ( src , dest , csv_keys ) :
"""Encrypt a file with the specific GPG keys and write out
to the specified path""" | keys = massage_keys ( csv_keys . split ( ',' ) )
cryptorito . encrypt ( src , dest , keys ) |
def _get_store_by_name ( self , name ) :
"""Return an instance of the correct DiskRepository based on the * first * file that matches the standard syntax for repository files""" | for cls in self . storage_type_map . values ( ) :
cluster_files = glob . glob ( '%s/%s.%s' % ( self . storage_path , name , cls . file_ending ) )
if cluster_files :
try :
return cls ( self . storage_path )
except :
continue
raise ClusterNotFound ( "No cluster %s was found" % name ) |
def save_df_state ( df_state : pd . DataFrame , site : str = '' , path_dir_save : Path = Path ( '.' ) , ) -> Path :
'''save ` df _ state ` to a csv file
Parameters
df _ state : pd . DataFrame
a dataframe of model states produced by a supy run
site : str , optional
site identifier ( the default is ' ' , which indicates an empty site code )
path _ dir _ save : Path , optional
path to directory to save results ( the default is Path ( ' . ' ) , which the current working directory )
Returns
Path
path to the saved csv file''' | file_state_save = 'df_state_{site}.csv' . format ( site = site )
# trim filename if site = = ' '
file_state_save = file_state_save . replace ( '_.csv' , '.csv' )
path_state_save = path_dir_save / file_state_save
print ( 'writing out: {path_out}' . format ( path_out = path_state_save ) )
df_state . to_csv ( path_state_save )
return path_state_save |
def iter_query ( query ) :
"""Accept a filename , stream , or string .
Returns an iterator over lines of the query .""" | try :
itr = click . open_file ( query ) . readlines ( )
except IOError :
itr = [ query ]
return itr |
async def deleteMessage ( self , msg_identifier ) :
"""See : https : / / core . telegram . org / bots / api # deletemessage
: param msg _ identifier :
Same as ` ` msg _ identifier ` ` in : meth : ` telepot . aio . Bot . editMessageText ` ,
except this method does not work on inline messages .""" | p = _strip ( locals ( ) , more = [ 'msg_identifier' ] )
p . update ( _dismantle_message_identifier ( msg_identifier ) )
return await self . _api_request ( 'deleteMessage' , _rectify ( p ) ) |
def start ( self ) :
"""This method must be called immediately after the class is instantiated .
It instantiates the serial interface and then performs auto pin
discovery .
It is intended for use by pymata3 applications that do not
use asyncio coroutines directly .
: returns : No return value .""" | # check if user specified a socket transport
if self . ip_address :
self . socket = PymataSocket ( self . ip_address , self . ip_port , self . loop )
self . loop . run_until_complete ( ( self . socket . start ( ) ) )
# set the read and write handles
self . read = self . socket . read
self . write = self . socket . write
for i in range ( 0 , len ( self . ip_handshake ) ) :
self . loop . run_until_complete ( ( self . read ( ) ) )
else :
try :
self . serial_port = PymataSerial ( self . com_port , 57600 , self . sleep_tune , self . log_output )
# set the read and write handles
self . read = self . serial_port . read
self . write = self . serial_port . write
except serial . SerialException :
if self . log_output :
log_string = 'Cannot instantiate serial interface: ' + self . com_port
logging . exception ( log_string )
else :
print ( 'Cannot instantiate serial interface: ' + self . com_port )
print ( 'To see a list of serial ports, type: "list_serial_ports" in your console.' )
sys . exit ( 0 )
# wait for arduino to go through a reset cycle if need be
time . sleep ( self . arduino_wait )
# register the get _ command method with the event loop
# self . loop = asyncio . get _ event _ loop ( )
self . the_task = self . loop . create_task ( self . _command_dispatcher ( ) )
# get arduino firmware version and print it
try :
firmware_version = self . loop . run_until_complete ( self . get_firmware_version ( ) )
if self . log_output :
log_string = "\nArduino Firmware ID: " + firmware_version
logging . exception ( log_string )
else :
print ( "\nArduino Firmware ID: " + firmware_version )
except TypeError :
print ( '\nIs your serial cable plugged in and do you have the correct Firmata sketch loaded?' )
print ( 'Is the COM port correct?' )
print ( 'To see a list of serial ports, type: "list_serial_ports" in your console.' )
sys . exit ( 0 )
# try to get an analog pin map . if it comes back as none - shutdown
report = self . loop . run_until_complete ( self . get_analog_map ( ) )
if not report :
if self . log_output :
log_string = '*** Analog map retrieval timed out. ***'
logging . exception ( log_string )
log_string = '\nDo you have Arduino connectivity and do you ' 'have a Firmata sketch uploaded to the board?'
logging . exception ( log_string )
else :
print ( '*** Analog map retrieval timed out. ***' )
print ( '\nDo you have Arduino connectivity and do you have a ' 'Firmata sketch uploaded to the board?' )
try :
loop = self . loop
for t in asyncio . Task . all_tasks ( loop ) :
t . cancel ( )
loop . run_until_complete ( asyncio . sleep ( .1 ) )
loop . close ( )
loop . stop ( )
sys . exit ( 0 )
except RuntimeError : # this suppresses the Event Loop Is Running message , which may
# be a bug in python 3
sys . exit ( 0 )
except TypeError :
sys . exit ( 0 )
# custom assemble the pin lists
for pin in report :
digital_data = PinData ( )
self . digital_pins . append ( digital_data )
if pin != Constants . IGNORE :
analog_data = PinData ( )
self . analog_pins . append ( analog_data )
if self . log_output :
log_string = 'Auto-discovery complete. Found ' + str ( len ( self . digital_pins ) ) + ' Digital Pins and ' + str ( len ( self . analog_pins ) ) + ' Analog Pins'
logging . info ( log_string )
else :
print ( '{} {} {} {} {}' . format ( 'Auto-discovery complete. Found' , len ( self . digital_pins ) , 'Digital Pins and' , len ( self . analog_pins ) , 'Analog Pins\n\n' ) )
self . first_analog_pin = len ( self . digital_pins ) - len ( self . analog_pins ) |
def __get_type_from_char ( self , c ) :
"""return a tuple of type information
* type name
* a flag to indicate if it ' s a collection""" | if c . isdigit ( ) or c == '-' :
return ( "number" , False , None )
elif c == 't' or c == 'f' : # # true / false
return ( "boolean" , False , None )
elif c == 'n' : # # nil
return ( "nil" , False , None )
elif c == '\\' :
return ( "char" , False , None )
elif c == ':' :
return ( "keyword" , False , None )
elif c == '"' :
return ( "string" , False , None )
elif c == '#' :
if self . __read_and_back ( 1 ) == '{' :
return ( "set" , True , "}" )
if self . __read_and_back ( 1 ) == ':' :
return ( "namespaced_dict" , True , "}" )
if self . __read_and_back ( 4 ) == 'inst' :
return ( "datetime" , False , None )
if self . __read_and_back ( 4 ) == 'uuid' :
return ( "uuid" , False , None )
elif c == '{' :
return ( "dict" , True , "}" )
elif c == '(' :
return ( "list" , True , ")" )
elif c == '[' :
return ( 'list' , True , "]" )
return ( None , False , None ) |
def _build ( self , one_hot_input_sequence ) :
"""Builds the deep LSTM model sub - graph .
Args :
one _ hot _ input _ sequence : A Tensor with the input sequence encoded as a
one - hot representation . Its dimensions should be ` [ truncation _ length ,
batch _ size , output _ size ] ` .
Returns :
Tuple of the Tensor of output logits for the batch , with dimensions
` [ truncation _ length , batch _ size , output _ size ] ` , and the
final state of the unrolled core , .""" | input_shape = one_hot_input_sequence . get_shape ( )
batch_size = input_shape [ 1 ]
batch_embed_module = snt . BatchApply ( self . _embed_module )
input_sequence = batch_embed_module ( one_hot_input_sequence )
input_sequence = tf . nn . relu ( input_sequence )
initial_state = self . _core . initial_state ( batch_size )
if self . _use_dynamic_rnn :
output_sequence , final_state = tf . nn . dynamic_rnn ( cell = self . _core , inputs = input_sequence , time_major = True , initial_state = initial_state )
else :
rnn_input_sequence = tf . unstack ( input_sequence )
output , final_state = tf . contrib . rnn . static_rnn ( cell = self . _core , inputs = rnn_input_sequence , initial_state = initial_state )
output_sequence = tf . stack ( output )
batch_output_module = snt . BatchApply ( self . _output_module )
output_sequence_logits = batch_output_module ( output_sequence )
return output_sequence_logits , final_state |
def generate_cache_key ( value ) :
"""Generates a cache key for the * args and * * kwargs""" | if is_bytes ( value ) :
return hashlib . md5 ( value ) . hexdigest ( )
elif is_text ( value ) :
return generate_cache_key ( to_bytes ( text = value ) )
elif is_boolean ( value ) or is_null ( value ) or is_number ( value ) :
return generate_cache_key ( repr ( value ) )
elif is_dict ( value ) :
return generate_cache_key ( ( ( key , value [ key ] ) for key in sorted ( value . keys ( ) ) ) )
elif is_list_like ( value ) or isinstance ( value , collections . abc . Generator ) :
return generate_cache_key ( "" . join ( ( generate_cache_key ( item ) for item in value ) ) )
else :
raise TypeError ( "Cannot generate cache key for value {0} of type {1}" . format ( value , type ( value ) , ) ) |
def remove_all_callbacks ( self ) :
"""Remove all callbacks registered to the shortcut manager
: return :""" | for action in self . registered_shortcut_callbacks . keys ( ) :
for callback in self . registered_shortcut_callbacks [ action ] :
self . shortcut_manager . remove_callback_for_action ( action , callback )
# delete all registered shortcut callbacks
self . registered_shortcut_callbacks = { } |
def buffer_write ( self , data , dtype ) :
"""Write audio data from a buffer / bytes object to the file .
Writes the contents of ` data ` to the file at the current
read / write position .
This also advances the read / write position by the number of
frames that were written and enlarges the file if necessary .
Parameters
data : buffer or bytes
A buffer or bytes object containing the audio data to be
written .
dtype : { ' float64 ' , ' float32 ' , ' int32 ' , ' int16 ' }
The data type of the audio data stored in ` data ` .
See Also
. write , buffer _ read""" | ctype = self . _check_dtype ( dtype )
cdata , frames = self . _check_buffer ( data , ctype )
written = self . _cdata_io ( 'write' , cdata , ctype , frames )
assert written == frames
self . _update_frames ( written ) |
def sg_lookup ( tensor , opt ) :
r"""Looks up the ` tensor ` , which is the embedding matrix .
Args :
tensor : A tensor ( automatically given by chain )
opt :
emb : A 2 - D ` Tensor ` . An embedding matrix .
name : If provided , replace current tensor ' s name .
Returns :
A ` Tensor ` .""" | assert opt . emb is not None , 'emb is mandatory.'
return tf . nn . embedding_lookup ( opt . emb , tensor , name = opt . name ) |
def smacof_mds ( C , dim , max_iter = 3000 , eps = 1e-9 ) :
"""Returns an interpolated point cloud following the dissimilarity matrix C
using SMACOF multidimensional scaling ( MDS ) in specific dimensionned
target space
Parameters
C : ndarray , shape ( ns , ns )
dissimilarity matrix
dim : int
dimension of the targeted space
max _ iter : int
Maximum number of iterations of the SMACOF algorithm for a single run
eps : float
relative tolerance w . r . t stress to declare converge
Returns
npos : ndarray , shape ( R , dim )
Embedded coordinates of the interpolated point cloud ( defined with
one isometry )""" | rng = np . random . RandomState ( seed = 3 )
mds = manifold . MDS ( dim , max_iter = max_iter , eps = 1e-9 , dissimilarity = 'precomputed' , n_init = 1 )
pos = mds . fit ( C ) . embedding_
nmds = manifold . MDS ( 2 , max_iter = max_iter , eps = 1e-9 , dissimilarity = "precomputed" , random_state = rng , n_init = 1 )
npos = nmds . fit_transform ( C , init = pos )
return npos |
def coverage ( self , container : Container , tests : Optional [ Iterable [ TestCase ] ] = None , * , instrument : bool = True ) -> TestSuiteCoverage :
"""Computes line coverage information over a provided set of tests for
the program inside a given container .""" | extractor = self . coverage_extractor ( container )
if tests is None :
bugs = self . __installation . bugs
bug = bugs [ container . bug ]
tests = bug . tests
return extractor . run ( tests , instrument = instrument ) |
def _set_offset_base1 ( self , v , load = False ) :
"""Setter method for offset _ base1 , mapped from YANG variable / uda _ key / profile / uda _ profile _ offsets / offset _ base1 ( uda - offset - base - type )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ offset _ base1 is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ offset _ base1 ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = RestrictedClassType ( base_type = unicode , restriction_type = "dict_key" , restriction_arg = { u'first-header' : { 'value' : 1 } , u'packet-start' : { 'value' : 0 } , u'fourth-header' : { 'value' : 4 } , u'second-header' : { 'value' : 2 } , u'third-header' : { 'value' : 3 } } , ) , is_leaf = True , yang_name = "offset-base1" , rest_name = "offset-base1" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'cli-drop-node-name' : None , u'cli-incomplete-command' : None , u'cli-optional-in-sequence' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-uda-access-list' , defining_module = 'brocade-uda-access-list' , yang_type = 'uda-offset-base-type' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """offset_base1 must be of a type compatible with uda-offset-base-type""" , 'defined-type' : "brocade-uda-access-list:uda-offset-base-type" , 'generated-type' : """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'first-header': {'value': 1}, u'packet-start': {'value': 0}, u'fourth-header': {'value': 4}, u'second-header': {'value': 2}, u'third-header': {'value': 3}},), is_leaf=True, yang_name="offset-base1", rest_name="offset-base1", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-command': None, u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-uda-access-list', defining_module='brocade-uda-access-list', yang_type='uda-offset-base-type', is_config=True)""" , } )
self . __offset_base1 = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.