signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def set_deplyment_vcenter_params ( vcenter_resource_model , deploy_params ) :
"""Sets the vcenter parameters if not already set at the deployment option
: param deploy _ params : vCenterVMFromTemplateResourceModel or vCenterVMFromImageResourceModel
: type vcenter _ resource _ model : VMwarevCenterResourceModel"""
|
# Override attributes
deploy_params . vm_cluster = deploy_params . vm_cluster or vcenter_resource_model . vm_cluster
deploy_params . vm_storage = deploy_params . vm_storage or vcenter_resource_model . vm_storage
deploy_params . vm_resource_pool = deploy_params . vm_resource_pool or vcenter_resource_model . vm_resource_pool
deploy_params . vm_location = deploy_params . vm_location or vcenter_resource_model . vm_location
deploy_params . default_datacenter = vcenter_resource_model . default_datacenter
if not deploy_params . vm_cluster :
raise ValueError ( 'VM Cluster is empty' )
if not deploy_params . vm_storage :
raise ValueError ( 'VM Storage is empty' )
if not deploy_params . vm_location :
raise ValueError ( 'VM Location is empty' )
if not deploy_params . default_datacenter :
raise ValueError ( 'Default Datacenter attribute on VMWare vCenter is empty' )
deploy_params . vm_location = VMLocation . combine ( [ deploy_params . default_datacenter , deploy_params . vm_location ] )
|
def from_indra_statements ( stmts , name : Optional [ str ] = None , version : Optional [ str ] = None , description : Optional [ str ] = None , authors : Optional [ str ] = None , contact : Optional [ str ] = None , license : Optional [ str ] = None , copyright : Optional [ str ] = None , disclaimer : Optional [ str ] = None , ) :
"""Import a model from : mod : ` indra ` .
: param List [ indra . statements . Statement ] stmts : A list of statements
: param name : The graph ' s name
: param version : The graph ' s version . Recommended to use ` semantic versioning < http : / / semver . org / > ` _ or
` ` YYYYMMDD ` ` format .
: param description : The description of the graph
: param authors : The authors of this graph
: param contact : The contact email for this graph
: param license : The license for this graph
: param copyright : The copyright for this graph
: param disclaimer : The disclaimer for this graph
: rtype : pybel . BELGraph"""
|
from indra . assemblers . pybel import PybelAssembler
pba = PybelAssembler ( stmts = stmts , name = name , version = version , description = description , authors = authors , contact = contact , license = license , copyright = copyright , disclaimer = disclaimer , )
graph = pba . make_model ( )
return graph
|
def zoom_in ( self , incr = 1.0 ) :
"""Zoom in a level .
Also see : meth : ` zoom _ to ` .
Parameters
incr : float ( optional , defaults to 1)
The value to increase the zoom level"""
|
level = self . zoom . calc_level ( self . t_ [ 'scale' ] )
self . zoom_to ( level + incr )
|
def on_ctcp ( self , connection , event ) :
"""Default handler for ctcp events .
Replies to VERSION and PING requests and relays DCC requests
to the on _ dccchat method ."""
|
nick = event . source . nick
if event . arguments [ 0 ] == "VERSION" :
connection . ctcp_reply ( nick , "VERSION " + self . get_version ( ) )
elif event . arguments [ 0 ] == "PING" :
if len ( event . arguments ) > 1 :
connection . ctcp_reply ( nick , "PING " + event . arguments [ 1 ] )
elif ( event . arguments [ 0 ] == "DCC" and event . arguments [ 1 ] . split ( " " , 1 ) [ 0 ] == "CHAT" ) :
self . on_dccchat ( connection , event )
|
def _isstring ( dtype ) :
"""Given a numpy dtype , determines whether it is a string . Returns True
if the dtype is string or unicode ."""
|
return dtype . type == numpy . unicode_ or dtype . type == numpy . string_
|
def QA_util_date_int2str ( int_date ) :
"""类型datetime . datatime
: param date : int 8位整数
: return : 类型str"""
|
date = str ( int_date )
if len ( date ) == 8 :
return str ( date [ 0 : 4 ] + '-' + date [ 4 : 6 ] + '-' + date [ 6 : 8 ] )
elif len ( date ) == 10 :
return date
|
def summary ( x , rm_nan = False , debug = False ) :
"""Compute basic statistical parameters .
Parameters
x : 1d numpy array , float
Input array with values which statistical properties are
requested .
rm _ nan : bool
If True , filter out NaN values before computing statistics .
debug : bool
If True prints computed values .
Returns
result : Python dictionary
Number of points , minimum , percentile 25 , percentile 50
( median ) , mean , percentile 75 , maximum , standard deviation ,
robust standard deviation , percentile 15.866 ( equivalent
to - 1 sigma in a normal distribution ) and percentile 84.134
( + 1 sigma ) ."""
|
# protections
if type ( x ) is np . ndarray :
xx = np . copy ( x )
else :
if type ( x ) is list :
xx = np . array ( x )
else :
raise ValueError ( 'x=' + str ( x ) + ' must be a numpy.ndarray' )
if xx . ndim is not 1 :
raise ValueError ( 'xx.dim=' + str ( xx . ndim ) + ' must be 1' )
# filter out NaN ' s
if rm_nan :
xx = xx [ np . logical_not ( np . isnan ( xx ) ) ]
# compute basic statistics
npoints = len ( xx )
ok = npoints > 0
result = { 'npoints' : npoints , 'minimum' : np . min ( xx ) if ok else 0 , 'percentile25' : np . percentile ( xx , 25 ) if ok else 0 , 'median' : np . percentile ( xx , 50 ) if ok else 0 , 'mean' : np . mean ( xx ) if ok else 0 , 'percentile75' : np . percentile ( xx , 75 ) if ok else 0 , 'maximum' : np . max ( xx ) if ok else 0 , 'std' : np . std ( xx ) if ok else 0 , 'robust_std' : robust_std ( xx ) if ok else 0 , 'percentile15' : np . percentile ( xx , 15.86553 ) if ok else 0 , 'percentile84' : np . percentile ( xx , 84.13447 ) if ok else 0 }
if debug :
print ( '>>> ========================================' )
print ( '>>> STATISTICAL SUMMARY:' )
print ( '>>> ----------------------------------------' )
print ( '>>> Number of points.........:' , result [ 'npoints' ] )
print ( '>>> Minimum..................:' , result [ 'minimum' ] )
print ( '>>> 1st Quartile.............:' , result [ 'percentile25' ] )
print ( '>>> Median...................:' , result [ 'median' ] )
print ( '>>> Mean.....................:' , result [ 'mean' ] )
print ( '>>> 3rd Quartile.............:' , result [ 'percentile75' ] )
print ( '>>> Maximum..................:' , result [ 'maximum' ] )
print ( '>>> ----------------------------------------' )
print ( '>>> Standard deviation.......:' , result [ 'std' ] )
print ( '>>> Robust standard deviation:' , result [ 'robust_std' ] )
print ( '>>> 0.1586553 percentile.....:' , result [ 'percentile15' ] )
print ( '>>> 0.8413447 percentile.....:' , result [ 'percentile84' ] )
print ( '>>> ========================================' )
return result
|
def drawBackground ( self , painter , opt , rect , brush ) :
"""Make sure the background extends to 0 for the first item .
: param painter | < QtGui . QPainter >
rect | < QtCore . QRect >
brush | < QtGui . QBrush >"""
|
if not brush :
return
painter . setPen ( QtCore . Qt . NoPen )
painter . setBrush ( brush )
painter . drawRect ( rect )
|
def _ReadStructureDataTypeDefinition ( self , definitions_registry , definition_values , definition_name , is_member = False ) :
"""Reads a structure data type definition .
Args :
definitions _ registry ( DataTypeDefinitionsRegistry ) : data type definitions
registry .
definition _ values ( dict [ str , object ] ) : definition values .
definition _ name ( str ) : name of the definition .
is _ member ( Optional [ bool ] ) : True if the data type definition is a member
data type definition .
Returns :
StructureDefinition : structure data type definition .
Raises :
DefinitionReaderError : if the definitions values are missing or if
the format is incorrect ."""
|
if is_member :
error_message = 'data type not supported as member'
raise errors . DefinitionReaderError ( definition_name , error_message )
return self . _ReadDataTypeDefinitionWithMembers ( definitions_registry , definition_values , data_types . StructureDefinition , definition_name , supports_conditions = True )
|
def calc_hazard_curves ( groups , ss_filter , imtls , gsim_by_trt , truncation_level = None , apply = sequential_apply , filter_distance = 'rjb' , reqv = None ) :
"""Compute hazard curves on a list of sites , given a set of seismic source
groups and a dictionary of ground shaking intensity models ( one per
tectonic region type ) .
Probability of ground motion exceedance is computed in different ways
depending if the sources are independent or mutually exclusive .
: param groups :
A sequence of groups of seismic sources objects ( instances of
of : class : ` ~ openquake . hazardlib . source . base . BaseSeismicSource ` ) .
: param ss _ filter :
A source filter over the site collection or the site collection itself
: param imtls :
Dictionary mapping intensity measure type strings
to lists of intensity measure levels .
: param gsim _ by _ trt :
Dictionary mapping tectonic region types ( members
of : class : ` openquake . hazardlib . const . TRT ` ) to
: class : ` ~ openquake . hazardlib . gsim . base . GMPE ` or
: class : ` ~ openquake . hazardlib . gsim . base . IPE ` objects .
: param truncation _ level :
Float , number of standard deviations for truncation of the intensity
distribution .
: param apply :
apply function to use ( default sequential _ apply )
: param filter _ distance :
The distance used to filter the ruptures ( default rjb )
: param reqv :
If not None , an instance of RjbEquivalent
: returns :
An array of size N , where N is the number of sites , which elements
are records with fields given by the intensity measure types ; the
size of each field is given by the number of levels in ` ` imtls ` ` ."""
|
# This is ensuring backward compatibility i . e . processing a list of
# sources
if not isinstance ( groups [ 0 ] , SourceGroup ) : # sent a list of sources
odic = groupby ( groups , operator . attrgetter ( 'tectonic_region_type' ) )
groups = [ SourceGroup ( trt , odic [ trt ] , 'src_group' , 'indep' , 'indep' ) for trt in odic ]
# ensure the sources have the right src _ group _ id
for i , grp in enumerate ( groups ) :
for src in grp :
if src . src_group_id is None :
src . src_group_id = i
imtls = DictArray ( imtls )
param = dict ( imtls = imtls , truncation_level = truncation_level , filter_distance = filter_distance , reqv = reqv , cluster = grp . cluster )
pmap = ProbabilityMap ( len ( imtls . array ) , 1 )
# Processing groups with homogeneous tectonic region
gsim = gsim_by_trt [ groups [ 0 ] [ 0 ] . tectonic_region_type ]
mon = Monitor ( )
for group in groups :
if group . atomic : # do not split
it = [ classical ( group , ss_filter , [ gsim ] , param , mon ) ]
else : # split the group and apply ` classical ` in parallel
it = apply ( classical , ( group . sources , ss_filter , [ gsim ] , param , mon ) , weight = operator . attrgetter ( 'weight' ) )
for dic in it :
for grp_id , pval in dic [ 'pmap' ] . items ( ) :
pmap |= pval
sitecol = getattr ( ss_filter , 'sitecol' , ss_filter )
return pmap . convert ( imtls , len ( sitecol . complete ) )
|
def _scan_file ( filename , sentinel , source_type = 'import' ) :
'''Generator that performs the actual scanning of files .
Yeilds a tuple containing import type , import path , and an extra file
that should be scanned . Extra file scans should be the file or directory
that relates to the import name .'''
|
filename = os . path . abspath ( filename )
real_filename = os . path . realpath ( filename )
if os . path . getsize ( filename ) <= max_file_size :
if real_filename not in sentinel and os . path . isfile ( filename ) :
sentinel . add ( real_filename )
basename = os . path . basename ( filename )
scope , imports = ast_scan_file ( filename )
if scope is not None and imports is not None :
for imp in imports :
yield ( source_type , imp . module , None )
if 'INSTALLED_APPS' in scope and basename == 'settings.py' :
log . info ( 'Found Django settings: %s' , filename )
for item in django . handle_django_settings ( filename ) :
yield item
else :
log . warn ( 'Could not scan imports from: %s' , filename )
else :
log . warn ( 'File size too large: %s' , filename )
|
def DeletePendingNotification ( self , timestamp ) :
"""Deletes the pending notification with the given timestamp .
Args :
timestamp : The timestamp of the notification . Assumed to be unique .
Raises :
UniqueKeyError : Raised if multiple notifications have the timestamp ."""
|
shown_notifications = self . Get ( self . Schema . SHOWN_NOTIFICATIONS )
if not shown_notifications :
shown_notifications = self . Schema . SHOWN_NOTIFICATIONS ( )
pending = self . Get ( self . Schema . PENDING_NOTIFICATIONS )
if not pending :
return
# Remove all notifications with the given timestamp from pending
# and add them to the shown notifications .
delete_count = 0
for idx in reversed ( range ( 0 , len ( pending ) ) ) :
if pending [ idx ] . timestamp == timestamp :
shown_notifications . Append ( pending [ idx ] )
pending . Pop ( idx )
delete_count += 1
if delete_count > 1 :
raise UniqueKeyError ( "Multiple notifications at %s" % timestamp )
self . Set ( self . Schema . PENDING_NOTIFICATIONS , pending )
self . Set ( self . Schema . SHOWN_NOTIFICATIONS , shown_notifications )
|
def upsert_document_acl_trigger ( plpy , td ) :
"""Trigger for filling in acls when legacy publishes .
A compatibility trigger to upsert authorization control entries ( ACEs )
for legacy publications ."""
|
modified_state = "OK"
uuid_ = td [ 'new' ] [ 'uuid' ]
authors = td [ 'new' ] [ 'authors' ] and td [ 'new' ] [ 'authors' ] or [ ]
maintainers = td [ 'new' ] [ 'maintainers' ] and td [ 'new' ] [ 'maintainers' ] or [ ]
is_legacy_publication = td [ 'new' ] [ 'version' ] is not None
if not is_legacy_publication :
return modified_state
# Upsert all authors and maintainers into the ACL
# to give them publish permission .
permissibles = [ ]
permissibles . extend ( authors )
permissibles . extend ( maintainers )
permissibles = set ( [ ( uid , 'publish' , ) for uid in permissibles ] )
plan = plpy . prepare ( """\
SELECT user_id, permission FROM document_acl WHERE uuid = $1""" , [ 'uuid' ] )
existing_permissibles = set ( [ ( r [ 'user_id' ] , r [ 'permission' ] , ) for r in plpy . execute ( plan , ( uuid_ , ) ) ] )
new_permissibles = permissibles . difference ( existing_permissibles )
for uid , permission in new_permissibles :
plan = plpy . prepare ( """\
INSERT INTO document_acl (uuid, user_id, permission)
VALUES ($1, $2, $3)""" , [ 'uuid' , 'text' , 'permission_type' ] )
plpy . execute ( plan , ( uuid_ , uid , permission , ) )
|
def get_gan_loss ( self , true_frames , gen_frames , name ) :
"""Get the discriminator + generator loss at every step .
This performs an 1:1 update of the discriminator and generator at every
step .
Args :
true _ frames : 5 - D Tensor of shape ( num _ steps , batch _ size , H , W , C )
Assumed to be ground truth .
gen _ frames : 5 - D Tensor of shape ( num _ steps , batch _ size , H , W , C )
Assumed to be fake .
name : discriminator scope .
Returns :
loss : 0 - D Tensor , with d _ loss + g _ loss"""
|
# D - STEP
with tf . variable_scope ( "%s_discriminator" % name , reuse = tf . AUTO_REUSE ) :
gan_d_loss , _ , fake_logits_stop = self . d_step ( true_frames , gen_frames )
# G - STEP
with tf . variable_scope ( "%s_discriminator" % name , reuse = True ) :
gan_g_loss_pos_d , gan_g_loss_neg_d = self . g_step ( gen_frames , fake_logits_stop )
gan_g_loss = gan_g_loss_pos_d + gan_g_loss_neg_d
tf . summary . scalar ( "gan_loss_%s" % name , gan_g_loss_pos_d + gan_d_loss )
if self . hparams . gan_optimization == "joint" :
gan_loss = gan_g_loss + gan_d_loss
else :
curr_step = self . get_iteration_num ( )
gan_loss = tf . cond ( tf . logical_not ( curr_step % 2 == 0 ) , lambda : gan_g_loss , lambda : gan_d_loss )
return gan_loss
|
def to_string ( self , indentLevel = 1 , title = True , tags = None , projects = None , tasks = None , notes = None ) :
"""* convert this taskpaper object to a string *
* * Key Arguments : * *
- ` ` indentLevel ` ` - - the level of the indent for this object . Default * 1 * .
- ` ` title ` ` - - print the title of the taskpaper object alongside the contents . Default * True *
- ` ` tags ` ` - - replace tags with these tags . Default * None *
- ` ` projects ` ` - - replace projects with these projects , pass empty list to delete all projects . Default * None *
- ` ` tasks ` ` - - replace tasks with these ones , pass empty list to delete all tasks . Default * None *
- ` ` notes ` ` - - replace notes with these ones , pass empty list to delete all notes . Default * None *
* * Return : * *
- ` ` objectString ` ` - - the taskpaper object as a string
* * Usage : * *
If we have the * archive * project from a taskpaper document , we can convert it to a string using :
. . code - block : : python
print archiveProject . to _ string ( )
. . code - block : : text
Archive :
- and a third task @ done ( 2016-09-04 ) @ project ( parent project / child - project )
- and a forth task @ done ( 2016-09-04 ) @ project ( parent project / child - project )
- fill the kettle @ done ( 2016-09-04 ) @ project ( parent project / make coffee )
- boil the kettle @ done ( 2016-09-04 ) @ project ( parent project / make coffee )"""
|
indent = indentLevel * "\t"
objectString = ""
if title :
try : # NONE DOCUMENT OBJECTS
objectString += self . title
except :
pass
try :
if tags :
tagString = ( " @" ) . join ( tags )
else :
tagString = ( " @" ) . join ( self . tags )
if len ( tagString ) :
objectString += " @" + tagString
except :
pass
try :
if not notes :
notes = self . notes
for n in notes :
if len ( n . title . strip ( ) ) :
if not self . parent and len ( objectString ) == 0 :
objectString += indent + n . title . strip ( ) + n . content
else :
objectString += "\n" + indent + n . title . strip ( ) + n . content
except :
pass
try :
if not tasks :
tasks = self . tasks
for t in tasks :
objectString += "\n" + indent + t . to_string ( indentLevel + 1 )
except :
pass
try :
if not projects :
projects = self . projects
for p in projects :
objectString += "\n" + indent + p . to_string ( indentLevel + 1 )
except :
pass
try :
objectString += "\n" + indent + self . searches
except :
pass
return objectString . strip ( )
|
def json_to_numpy ( string_like , dtype = None ) : # type : ( str ) - > np . array
"""Convert a JSON object to a numpy array .
Args :
string _ like ( str ) : JSON string .
dtype ( dtype , optional ) : Data type of the resulting array . If None , the dtypes will be determined by the
contents of each column , individually . This argument can only be used to
' upcast ' the array . For downcasting , use the . astype ( t ) method .
Returns :
( np . array ) : numpy array"""
|
data = json . loads ( string_like )
return np . array ( data , dtype = dtype )
|
def set_position ( self , val ) :
"""Set the devive OPEN LEVEL ."""
|
if val == 0 :
self . close ( )
else :
setlevel = 255
if val < 1 :
setlevel = val * 100
elif val <= 0xff :
setlevel = val
set_command = StandardSend ( self . _address , COMMAND_LIGHT_ON_0X11_NONE , cmd2 = setlevel )
self . _send_method ( set_command , self . _open_message_received )
|
def filepath_to_uri ( path ) :
"""Convert an file system path to a URI portion that is suitable for
inclusion in a URL .
We are assuming input is either UTF - 8 or unicode already .
This method will encode certain chars that would normally be recognized as
special chars for URIs . Note that this method does not encode the '
character , as it is a valid character within URIs . See
encodeURIComponent ( ) JavaScript function for more details .
Returns an ASCII string containing the encoded result ."""
|
if path is None :
return path
# I know about ` os . sep ` and ` os . altsep ` but I want to leave
# some flexibility for hardcoding separators .
return urllib . quote ( path . replace ( "\\" , "/" ) , safe = b"/~!*()'" )
|
def final ( arg ) :
"""Mark a class or method as _ final _ .
Final classes are those that end the inheritance chain , i . e . forbid
further subclassing . A final class can thus be only instantiated ,
not inherited from .
Similarly , methods marked as final in a superclass cannot be overridden
in any of the subclasses .
. . note : :
Final method itself can also be ( in fact , it usually is ) an overridden
method from a superclass . In those cases , it ' s recommended to place
@ \ : func : ` final ` modifier before @ \ : func : ` override ` for clarity : :
class Foo ( Base ) :
@ final
@ override
def florb ( self ) :
super ( Foo , self ) . florb ( )
. . versionadded : : 0.0.3
Now applicable to methods in addition to classes"""
|
if inspect . isclass ( arg ) :
if not isinstance ( arg , ObjectMetaclass ) :
raise ValueError ( "@final can only be applied to a class " "that is a subclass of Object" )
elif not is_method ( arg ) :
raise TypeError ( "@final can only be applied to classes or methods" )
method = arg . method if isinstance ( arg , _WrappedMethod ) else arg
method . __final__ = True
return arg
|
def show_wbridges ( self ) :
"""Visualizes water bridges"""
|
grp = self . getPseudoBondGroup ( "Water Bridges-%i" % self . tid , associateWith = [ self . model ] )
grp . lineWidth = 3
for i , wbridge in enumerate ( self . plcomplex . waterbridges ) :
c = grp . newPseudoBond ( self . atoms [ wbridge . water_id ] , self . atoms [ wbridge . acc_id ] )
c . color = self . colorbyname ( 'cornflower blue' )
self . water_ids . append ( wbridge . water_id )
b = grp . newPseudoBond ( self . atoms [ wbridge . don_id ] , self . atoms [ wbridge . water_id ] )
b . color = self . colorbyname ( 'cornflower blue' )
self . water_ids . append ( wbridge . water_id )
if wbridge . protisdon :
self . bs_res_ids . append ( wbridge . don_id )
else :
self . bs_res_ids . append ( wbridge . acc_id )
|
def _filter ( self , blacklist = None , newest_only = False , type_filter = None , ** kwargs ) :
"""Args :
blacklist ( tuple ) : Iterable of of BlacklistEntry objects
newest _ only ( bool ) : Only the newest version of each plugin is returned
type ( str ) : Plugin type to retrieve
name ( str ) : Plugin name to retrieve
version ( str ) : Plugin version to retrieve
Returns nested dictionary of plugins
If a blacklist is supplied , plugins are evaluated against the blacklist entries"""
|
plugins = DictWithDotNotation ( )
filtered_name = kwargs . get ( self . _key_attr , None )
for key , val in self . _items ( type_filter , filtered_name ) :
plugin_blacklist = None
skip = False
if blacklist : # Assume blacklist is correct format since it is checked by PluginLoade
plugin_blacklist = [ ]
for entry in blacklist :
if getattr ( entry , self . _key_attr ) not in ( key , None ) :
continue
if all ( getattr ( entry , attr ) is None for attr in self . _bl_skip_attrs ) :
if not self . _skip_empty :
plugins [ key ] = None if filtered_name else self . _bl_empty ( )
skip = True
break
plugin_blacklist . append ( entry )
if not skip : # pylint : disable = protected - access
result = val . _filter ( plugin_blacklist , newest_only = newest_only , ** kwargs )
if result or not self . _skip_empty :
plugins [ key ] = result
if filtered_name :
return plugins . get ( filtered_name , None )
return plugins
|
def request ( cls , method , url , ** kwargs ) :
"""Make a http call to a remote API and return a json response ."""
|
user_agent = 'gandi.cli/%s' % __version__
headers = { 'User-Agent' : user_agent , 'Content-Type' : 'application/json; charset=utf-8' }
if kwargs . get ( 'headers' ) :
headers . update ( kwargs . pop ( 'headers' ) )
try :
response = requests . request ( method , url , headers = headers , ** kwargs )
response . raise_for_status ( )
try :
return response . json ( ) , response . headers
except ValueError as err :
return response . text , response . headers
except ( socket . error , requests . exceptions . ConnectionError ) :
msg = 'Remote API service is unreachable'
raise APICallFailed ( msg )
except Exception as err :
if isinstance ( err , requests . HTTPError ) :
try :
resp = response . json ( )
except Exception :
msg = 'An unknown error has occurred: %s' % err
raise APICallFailed ( msg )
if resp . get ( 'message' ) :
error = resp . get ( 'message' )
if resp . get ( 'errors' ) :
error = cls . format_errors ( resp . get ( 'errors' ) )
msg = '%s: %s' % ( err , error )
else :
msg = 'An unknown error has occurred: %s' % err
raise APICallFailed ( msg )
|
def write_line ( self , message ) :
"""Unbuffered printing to stdout ."""
|
self . out . write ( message + "\n" )
self . out . flush ( )
|
def execute ( self , env , args ) :
"""Prints task information .
` env `
Runtime ` ` Environment ` ` instance .
` args `
Arguments object from arg parser ."""
|
start = self . _fuzzy_time_parse ( args . start )
if not start :
raise errors . FocusError ( u'Invalid start period provided' )
stats = self . _get_stats ( env . task , start )
self . _print_stats ( env , stats )
|
def refill ( self , sess ) :
"""Clears the current queue and then refills it with new data ."""
|
sess . run ( self . _clear_queue )
# Run until full .
while sess . run ( self . _fill_queue ) :
pass
|
def _finalize ( self , dtype = np . uint8 ) :
"""Finalize the image , that is put it in RGB mode , and set the channels
in unsigned 8bit format ( [ 0,255 ] range ) ( if the * dtype * doesn ' t say
otherwise ) ."""
|
channels = [ ]
if self . mode == "P" :
self . convert ( "RGB" )
if self . mode == "PA" :
self . convert ( "RGBA" )
for chn in self . channels :
if isinstance ( chn , np . ma . core . MaskedArray ) :
final_data = chn . data . clip ( 0 , 1 ) * np . iinfo ( dtype ) . max
else :
final_data = chn . clip ( 0 , 1 ) * np . iinfo ( dtype ) . max
if np . issubdtype ( dtype , np . integer ) :
final_data = np . round ( final_data )
channels . append ( np . ma . array ( final_data , dtype , mask = np . ma . getmaskarray ( chn ) ) )
if self . fill_value is not None :
fill_value = [ int ( col * np . iinfo ( dtype ) . max ) for col in self . fill_value ]
else :
fill_value = None
return channels , fill_value
|
def setup ( self , app ) :
"""Setup the plugin and prepare application ."""
|
super ( Plugin , self ) . setup ( app )
if 'jinja2' not in app . plugins :
raise PluginException ( 'The plugin requires Muffin-Jinja2 plugin installed.' )
self . cfg . prefix = self . cfg . prefix . rstrip ( '/' ) + '/'
self . cfg . exclude . append ( self . cfg . prefix )
# Setup debugtoolbar templates
app . ps . jinja2 . cfg . template_folders . append ( op . join ( PLUGIN_ROOT , 'templates' ) )
self . cfg . panels += list ( self . cfg . additional_panels )
panels_ = [ ]
for panel in self . cfg . panels :
if isinstance ( panel , str ) :
mod , _ , panel = panel . partition ( ':' )
mod = importlib . import_module ( mod )
panel = eval ( panel or 'DebugPanel' , mod . __dict__ )
panels_ . append ( panel )
self . cfg . panels = panels_
# Setup debugtoolbar static files
app . router . register_route ( StaticRoute ( 'debugtoolbar.static' , self . cfg . prefix + 'static/' , op . join ( PLUGIN_ROOT , 'static' ) ) )
app . register ( self . cfg . prefix + 'sse' , name = 'debugtoolbar.sse' ) ( self . sse )
app . register ( self . cfg . prefix + 'exception' , name = 'debugtoolbar.exception' ) ( self . exception )
app . register ( self . cfg . prefix + 'execute' , name = 'debugtoolbar.execute' ) ( self . execute )
app . register ( self . cfg . prefix + 'source' , name = 'debugtoolbar.source' ) ( self . source )
app . register ( self . cfg . prefix . rstrip ( '/' ) , self . cfg . prefix , self . cfg . prefix + '{request_id}' , name = 'debugtoolbar.request' ) ( self . view )
app [ 'debugtoolbar' ] = { }
app [ 'debugtoolbar' ] [ 'pdbt_token' ] = uuid . uuid4 ( ) . hex
self . history = app [ 'debugtoolbar' ] [ 'history' ] = utils . History ( 50 )
self . exceptions = app [ 'debugtoolbar' ] [ 'exceptions' ] = utils . History ( 50 )
self . frames = app [ 'debugtoolbar' ] [ 'frames' ] = utils . History ( 100 )
|
def is_reserved ( self ) :
"""Test if the address is otherwise IETF reserved .
Returns :
A boolean , True if the address is within one of the
reserved IPv6 Network ranges ."""
|
reserved_nets = [ IPv6Network ( u'::/8' ) , IPv6Network ( u'100::/8' ) , IPv6Network ( u'200::/7' ) , IPv6Network ( u'400::/6' ) , IPv6Network ( u'800::/5' ) , IPv6Network ( u'1000::/4' ) , IPv6Network ( u'4000::/3' ) , IPv6Network ( u'6000::/3' ) , IPv6Network ( u'8000::/3' ) , IPv6Network ( u'A000::/3' ) , IPv6Network ( u'C000::/3' ) , IPv6Network ( u'E000::/4' ) , IPv6Network ( u'F000::/5' ) , IPv6Network ( u'F800::/6' ) , IPv6Network ( u'FE00::/9' ) ]
return any ( self in x for x in reserved_nets )
|
def update_model_connector ( self , model_id , connector ) :
"""Update the connector information for a given model .
Returns None if the specified model not exist .
Parameters
model _ id : string
Unique model identifier
connector : dict
New connection information
Returns
ModelHandle"""
|
# Validate the given connector information
self . validate_connector ( connector )
# Connector information is valid . Ok to update the model .
return self . registry . update_connector ( model_id , connector )
|
def read ( self , amount : int = - 1 ) -> bytes :
'''Read data .'''
|
assert self . _state == ConnectionState . created , 'Expect conn created. Got {}.' . format ( self . _state )
data = yield from self . run_network_operation ( self . reader . read ( amount ) , close_timeout = self . _timeout , name = 'Read' )
return data
|
def diff ( a , b ) :
"""Compares JSON objects
: param a :
: param b :
: return : difference object a vs b"""
|
delta = diff ( a , b )
if not a :
return delta
for item in delta :
if isinstance ( a [ item ] , list ) :
if delta [ item ] == [ ] :
delta [ item ] = { JSONUtils . DIFF_DELETE : a [ item ] }
elif isinstance ( delta [ item ] , dict ) :
for key in delta [ item ] . keys ( ) :
if str ( key ) == JSONUtils . DIFF_DELETE :
delta_upd = { JSONUtils . DIFF_DELETE : [ ] }
for i in delta [ item ] [ key ] :
delta_upd [ JSONUtils . DIFF_DELETE ] . append ( a [ item ] [ i ] )
delta [ item ] = delta_upd
return delta
|
def getList ( self ) :
"""查询敏感词列表方法 方法
@ return code : 返回码 , 200 为正常 。
@ return word : 敏感词内容 。
@ return errorMessage : 错误信息 。"""
|
desc = { "name" : "ListWordfilterReslut" , "desc" : "listWordfilter返回结果" , "fields" : [ { "name" : "code" , "type" : "Integer" , "desc" : "返回码,200 为正常。" } , { "name" : "word" , "type" : "String" , "desc" : "敏感词内容。" } , { "name" : "errorMessage" , "type" : "String" , "desc" : "错误信息。" } ] }
r = self . call_api ( method = ( 'API' , 'POST' , 'application/x-www-form-urlencoded' ) , action = '/wordfilter/list.json' , params = { } )
return Response ( r , desc )
|
def oneday_weather_forecast ( location = 'Portland, OR' , inputs = ( 'Min Temperature' , 'Mean Temperature' , 'Max Temperature' , 'Max Humidity' , 'Mean Humidity' , 'Min Humidity' , 'Max Sea Level Pressure' , 'Mean Sea Level Pressure' , 'Min Sea Level Pressure' , 'Wind Direction' ) , outputs = ( 'Min Temperature' , 'Mean Temperature' , 'Max Temperature' , 'Max Humidity' ) , date = None , epochs = 200 , delays = ( 1 , 2 , 3 , 4 ) , num_years = 4 , use_cache = False , verbosity = 1 , ) :
"""Provide a weather forecast for tomorrow based on historical weather at that location"""
|
date = make_date ( date or datetime . datetime . now ( ) . date ( ) )
num_years = int ( num_years or 10 )
years = range ( date . year - num_years , date . year + 1 )
df = weather . daily ( location , years = years , use_cache = use_cache , verbosity = verbosity ) . sort ( )
# because up - to - date weather history was cached above , can use that cache , regardless of use _ cache kwarg
trainer , df = train_weather_predictor ( location , years = years , delays = delays , inputs = inputs , outputs = outputs , epochs = epochs , verbosity = verbosity , use_cache = True , )
nn = trainer . module
forecast = { 'trainer' : trainer }
yesterday = dict ( zip ( outputs , nn . activate ( trainer . ds [ 'input' ] [ - 2 ] ) ) )
forecast [ 'yesterday' ] = update_dict ( yesterday , { 'date' : df . index [ - 2 ] . date ( ) } )
today = dict ( zip ( outputs , nn . activate ( trainer . ds [ 'input' ] [ - 1 ] ) ) )
forecast [ 'today' ] = update_dict ( today , { 'date' : df . index [ - 1 ] . date ( ) } )
ds = util . input_dataset_from_dataframe ( df [ - max ( delays ) : ] , delays = delays , inputs = inputs , normalize = False , verbosity = 0 )
tomorrow = dict ( zip ( outputs , nn . activate ( ds [ 'input' ] [ - 1 ] ) ) )
forecast [ 'tomorrow' ] = update_dict ( tomorrow , { 'date' : ( df . index [ - 1 ] + datetime . timedelta ( 1 ) ) . date ( ) } )
return forecast
|
def _generate_storage_broker_lookup ( ) :
"""Return dictionary of available storage brokers ."""
|
storage_broker_lookup = dict ( )
for entrypoint in iter_entry_points ( "dtool.storage_brokers" ) :
StorageBroker = entrypoint . load ( )
storage_broker_lookup [ StorageBroker . key ] = StorageBroker
return storage_broker_lookup
|
def render_query ( dataset , tables , select = None , conditions = None , groupings = None , having = None , order_by = None , limit = None ) :
"""Render a query that will run over the given tables using the specified
parameters .
Parameters
dataset : str
The BigQuery dataset to query data from
tables : Union [ dict , list ]
The table in ` dataset ` to query .
select : dict , optional
The keys function as column names and the values function as options to
apply to the select field such as alias and format . For example ,
select [ ' start _ time ' ] might have the form
{ ' alias ' : ' StartTime ' , ' format ' : ' INTEGER - FORMAT _ UTC _ USEC ' } , which
would be represented as ' SEC _ TO _ TIMESTAMP ( INTEGER ( start _ time ) ) as
StartTime ' in a query . Pass ` None ` to select all .
conditions : list , optional
a ` ` list ` ` of ` ` dict ` ` objects to filter results by . Each dict should
have the keys ' field ' , ' type ' , and ' comparators ' . The first two map to
strings representing the field ( e . g . ' foo ' ) and type ( e . g . ' FLOAT ' ) .
' comparators ' maps to another ` ` dict ` ` containing the keys ' condition ' ,
' negate ' , and ' value ' .
If ' comparators ' = { ' condition ' : ' > = ' , ' negate ' : False , ' value ' : 1 } ,
this example will be rendered as ' foo > = FLOAT ( ' 1 ' ) ' in the query .
` ` list ` ` of field names to group by
order _ by : dict , optional
Keys = { ' field ' , ' direction ' } . ` dict ` should be formatted as
{ ' field ' : ' TimeStamp , ' direction ' : ' desc ' } or similar
limit : int , optional
Limit the amount of data needed to be returned .
Returns
str
A rendered query"""
|
if None in ( dataset , tables ) :
return None
query = "%s %s %s %s %s %s %s" % ( _render_select ( select ) , _render_sources ( dataset , tables ) , _render_conditions ( conditions ) , _render_groupings ( groupings ) , _render_having ( having ) , _render_order ( order_by ) , _render_limit ( limit ) )
return query
|
def media_update ( self , id , description = None , focus = None ) :
"""Update the metadata of the media file with the given ` id ` . ` description ` and
` focus ` are as in ` media _ post ( ) ` _ .
Returns the updated ` media dict ` _ ."""
|
id = self . __unpack_id ( id )
if focus != None :
focus = str ( focus [ 0 ] ) + "," + str ( focus [ 1 ] )
params = self . __generate_params ( locals ( ) , [ 'id' ] )
return self . __api_request ( 'PUT' , '/api/v1/media/{0}' . format ( str ( id ) ) , params )
|
def StartTag ( self , name , attrs ) :
'''A method which is called by the SAX parser when a new tag is encountered
: param name : name of the tag
: param attrs : the tag ' s attributes
: return : none , side effect of modifying bits of the current class'''
|
if name not in self . excluded :
if name in self . structure . keys ( ) :
self . handler = self . structure [ name ]
self . tags . append ( name )
if attrs is not None :
self . attribs [ name ] = attrs
self . isDynamic = CheckDynamics ( name )
if self . isDynamic and "dynamics" in self . tags :
self . handler ( self . tags , self . attribs , self . chars , self . piece , self . data )
if name in self . closed_tags and self . handler is not None :
self . handler ( self . tags , self . attribs , self . chars , self . piece , self . data )
|
def load ( self ) :
"""This loads the variables and attributes simultaneously .
A centralized loading function makes it easier to create
data stores that do automatic encoding / decoding .
For example : :
class SuffixAppendingDataStore ( AbstractDataStore ) :
def load ( self ) :
variables , attributes = AbstractDataStore . load ( self )
variables = { ' % s _ suffix ' % k : v
for k , v in variables . items ( ) }
attributes = { ' % s _ suffix ' % k : v
for k , v in attributes . items ( ) }
return variables , attributes
This function will be called anytime variables or attributes
are requested , so care should be taken to make sure its fast ."""
|
variables = FrozenOrderedDict ( ( _decode_variable_name ( k ) , v ) for k , v in self . get_variables ( ) . items ( ) )
attributes = FrozenOrderedDict ( self . get_attrs ( ) )
return variables , attributes
|
def importdb ( indir ) :
"""Import a previously exported anchore DB"""
|
ecode = 0
try :
imgdir = os . path . join ( indir , "images" )
feeddir = os . path . join ( indir , "feeds" )
storedir = os . path . join ( indir , "storedfiles" )
for d in [ indir , imgdir , feeddir , storedir ] :
if not os . path . exists ( d ) :
raise Exception ( "specified directory " + str ( indir ) + " does not appear to be complete (missing " + str ( d ) + ")" )
anchore_print ( "importing images..." )
# imagelist = [ ]
for ifile in os . listdir ( imgdir ) :
patt = re . match ( "(.*)\.json" , ifile )
if patt :
imageId = patt . group ( 1 )
if contexts [ 'anchore_db' ] . is_image_present ( imageId ) :
anchore_print ( "\timage (" + str ( imageId ) + ") already exists in DB, skipping import." )
else : # imagelist . append ( patt . group ( 1 ) )
thefile = os . path . join ( imgdir , ifile )
with open ( thefile , 'r' ) as FH :
imagedata = json . loads ( FH . read ( ) )
try :
rc = contexts [ 'anchore_db' ] . save_image_new ( imageId , report = imagedata )
if not rc :
contexts [ 'anchore_db' ] . delete_image ( imageId )
raise Exception ( "save to anchore DB failed" )
except Exception as err :
contexts [ 'anchore_db' ] . delete_image ( imageId )
raise err
thedir = os . path . join ( storedir , imageId )
if os . path . exists ( thedir ) :
for namespace in os . listdir ( thedir ) :
thefile = os . path . join ( thedir , namespace , "stored_files.tar.gz" )
if os . path . exists ( thefile ) :
contexts [ 'anchore_db' ] . save_files_tarfile ( imageId , namespace , thefile )
anchore_print ( "\timage (" + str ( imageId ) + ") imported." )
anchore_print ( "importing feeds..." )
thefile = os . path . join ( feeddir , "feedmeta.json" )
with open ( thefile , 'r' ) as FH :
feedmeta = json . loads ( FH . read ( ) )
if feedmeta :
contexts [ 'anchore_db' ] . save_feedmeta ( feedmeta )
for feed in feedmeta :
feedobj = feedmeta [ feed ]
for group in feedobj [ 'groups' ] :
groupobj = feedobj [ 'groups' ] [ group ]
datafiles = groupobj . pop ( 'datafiles' , [ ] )
for datafile in datafiles :
thedir = os . path . join ( feeddir , feed , group )
thefile = os . path . join ( thedir , datafile )
if not os . path . exists ( thefile ) :
pass
else :
with open ( thefile , 'r' ) as FH :
contexts [ 'anchore_db' ] . save_feed_group_data ( feed , group , datafile , json . loads ( FH . read ( ) ) )
anchore_print ( "\tfeed (" + feed + " " + group + " " + datafile + ") imported" )
# TODO import stored files
except Exception as err :
anchore_print_err ( "operation failed: " + str ( err ) )
ecode = 1
sys . exit ( ecode )
|
def get_F_y ( fname = 'binzegger_connectivity_table.json' , y = [ 'p23' ] ) :
'''Extract frequency of occurrences of those cell types that are modeled .
The data set contains cell types that are not modeled ( TCs etc . )
The returned percentages are renormalized onto modeled cell - types , i . e . they sum up to 1'''
|
# Load data from json dictionary
f = open ( fname , 'r' )
data = json . load ( f )
f . close ( )
occurr = [ ]
for cell_type in y :
occurr += [ data [ 'data' ] [ cell_type ] [ 'occurrence' ] ]
return list ( np . array ( occurr ) / np . sum ( occurr ) )
|
def iter_list ( self , id , * args , ** kwargs ) :
"""Get a list of attachments . Whereas ` ` list ` ` fetches a single page
of attachments according to its ` ` limit ` ` and ` ` page ` `
arguments , ` ` iter _ list ` ` returns all attachments by internally
making successive calls to ` ` list ` ` .
: param id : Device ID as an int .
: param args : Arguments that ` ` list ` ` takes .
: param kwargs : Optional arguments that ` ` list ` ` takes .
: return : : class : ` attachments . Attachment < attachments . Attachment > ` list"""
|
l = partial ( self . list , id )
return self . service . iter_list ( l , * args , ** kwargs )
|
def get_trending_daily ( lang = "" ) :
"""Fetches repos in " Trending Daily " Github section
: param lang : Coding language
: return : List of GithubUserRepository"""
|
url = "https://github.com/trending/"
url += str ( lang ) . lower ( ) . replace ( " " , "" ) + "?since=daily"
api_content_request = urllib . request . Request ( url )
api_content_response = urllib . request . urlopen ( api_content_request ) . read ( ) . decode ( "utf-8" )
# parse response
soup = BeautifulSoup ( api_content_response , "lxml" )
# html parser
raw_repo_list = soup . find ( "ol" , { "class" : "repo-list" } ) . find_all ( "li" )
repos_list = [ ]
for repo in raw_repo_list :
details = repo . find_all ( "div" ) [ 0 ] . a . text . split ( "/" )
repo_owner = details [ 0 ] . strip ( )
repo_name = details [ 1 ] . strip ( )
repos_list . append ( GithubUserRepository ( repo_owner , repo_name ) )
return repos_list
|
def apply_range ( self , sampfrom = 0 , sampto = None ) :
"""Filter the annotation attributes to keep only items between the
desired sample values"""
|
sampto = sampto or self . sample [ - 1 ]
kept_inds = np . intersect1d ( np . where ( self . sample >= sampfrom ) , np . where ( self . sample <= sampto ) )
for field in [ 'sample' , 'label_store' , 'subtype' , 'chan' , 'num' ] :
setattr ( self , field , getattr ( self , field ) [ kept_inds ] )
self . aux_note = [ self . aux_note [ i ] for i in kept_inds ]
self . ann_len = len ( self . sample )
|
def demo_usage ( n_data = 50 , n_fit = 537 , nhead = 5 , ntail = 5 , plot = False , alt = 0 ) :
"""Plots a noisy sine curve and the fitting to it .
Also presents the error and the error in the
approximation of its first derivative ( cosine curve )
Usage example for benchmarking :
$ time python sine . py - - nhead 3 - - ntail 3 - - n - fit 500000 - - n - data 50000
Usage example for plotting :
$ python sine . py - - nhead 1 - - ntail 1 - - plot"""
|
x0 , xend = 0 , 5
# shaky linspace - 5 % to + 5 % noise
x_data = ( np . linspace ( x0 , xend , n_data ) + np . random . rand ( n_data ) * ( xend - x0 ) / n_data / 1.5 )
y_data = np . sin ( x_data ) * ( 1.0 + 0.1 * ( np . random . rand ( n_data ) - 0.5 ) )
x_fit = np . linspace ( x0 , xend , n_fit )
# Edges behave badly , work around :
x_fit [ 0 ] = x_fit [ 0 ] + ( x_fit [ 1 ] - x_fit [ 0 ] ) / 2
x_fit [ - 1 ] = x_fit [ - 2 ] + ( x_fit [ - 1 ] - x_fit [ - 2 ] ) / 2
if alt :
y_fit = np . empty ( n_fit )
dydx_fit = np . empty ( n_fit )
for i , xf in enumerate ( x_fit ) : # get index j of first data point beyond xf
j = np . where ( x_data > xf ) [ 0 ] [ 0 ]
lower_bound = max ( 0 , j - alt )
upper_bound = min ( n_data - 1 , j + alt )
y_fit [ i ] = derivatives_at_point_by_finite_diff ( x_data [ lower_bound : upper_bound ] , y_data [ lower_bound : upper_bound ] , xf , 0 )
dydx_fit [ i ] = derivatives_at_point_by_finite_diff ( x_data [ lower_bound : upper_bound ] , y_data [ lower_bound : upper_bound ] , xf , 1 ) [ 1 ]
else :
interp = interpolate_by_finite_diff ( x_data , y_data , x_fit , 1 , nhead , ntail )
y_fit = interp [ : , 0 ]
dydx_fit = interp [ : , 1 ]
if plot :
import matplotlib . pyplot as plt
plt . subplot ( 221 )
plt . plot ( x_data , y_data , 'x' , label = 'Data points (sin)' )
plt . plot ( x_fit , y_fit , '-' , label = 'Fitted curve (order=0)' )
plt . plot ( x_data , np . sin ( x_data ) , '-' , label = 'Analytic sin(x)' )
plt . legend ( )
plt . subplot ( 222 )
plt . plot ( x_fit , y_fit - np . sin ( x_fit ) , label = 'Error in order=0' )
plt . legend ( )
plt . subplot ( 223 )
plt . plot ( x_fit , dydx_fit , '-' , label = 'Fitted derivative (order=1)' )
plt . plot ( x_data , np . cos ( x_data ) , '-' , label = 'Analytic cos(x)' )
plt . legend ( )
plt . subplot ( 224 )
plt . plot ( x_fit , dydx_fit - np . cos ( x_fit ) , label = 'Error in order=1' )
plt . legend ( )
plt . show ( )
|
def rename_script ( rename = None ) : # noqa : E501
"""Rename a script
Rename a script # noqa : E501
: param rename : The data needed to save this script
: type rename : dict | bytes
: rtype : Response"""
|
if connexion . request . is_json :
rename = Rename . from_dict ( connexion . request . get_json ( ) )
# noqa : E501
if ( not hasAccess ( ) ) :
return redirectUnauthorized ( )
driver = LoadedDrivers . getDefaultDriver ( )
if ( not driver . renameScript ( rename . original . name , rename . new . name ) ) :
return ErrorResponse ( status = 500 , message = 'Cannot rename to an existing file.' )
return Response ( status = 200 , body = { 'file-name' : rename . new . name } )
|
def radial_symmetry ( mesh ) :
"""Check whether a mesh has rotational symmetry .
Returns
symmetry : None or str
None No rotational symmetry
' radial ' Symmetric around an axis
' spherical ' Symmetric around a point
axis : None or ( 3 , ) float
Rotation axis or point
section : None or ( 3 , 2 ) float
If radial symmetry provide vectors
to get cross section"""
|
# if not a volume this is meaningless
if not mesh . is_volume :
return None , None , None
# the sorted order of the principal components of inertia ( 3 , ) float
order = mesh . principal_inertia_components . argsort ( )
# we are checking if a geometry has radial symmetry
# if 2 of the PCI are equal , it is a revolved 2D profile
# if 3 of the PCI ( all of them ) are equal it is a sphere
# thus we take the diff of the sorted PCI , scale it as a ratio
# of the largest PCI , and then scale to the tolerance we care about
# if tol is 1e - 3 , that means that 2 components are identical if they
# are within . 1 % of the maximum PCI .
diff = np . abs ( np . diff ( mesh . principal_inertia_components [ order ] ) )
diff /= np . abs ( mesh . principal_inertia_components ) . max ( )
# diffs that are within tol of zero
diff_zero = ( diff / 1e-3 ) . astype ( int ) == 0
if diff_zero . all ( ) : # this is the case where all 3 PCI are identical
# this means that the geometry is symmetric about a point
# examples of this are a sphere , icosahedron , etc
axis = mesh . principal_inertia_vectors [ 0 ]
section = mesh . principal_inertia_vectors [ 1 : ]
return 'spherical' , axis , section
elif diff_zero . any ( ) : # this is the case for 2/3 PCI are identical
# this means the geometry is symmetric about an axis
# probably a revolved 2D profile
# we know that only 1/2 of the diff values are True
# if the first diff is 0 , it means if we take the first element
# in the ordered PCI we will have one of the non - revolve axis
# if the second diff is 0 , we take the last element of
# the ordered PCI for the section axis
# if we wanted the revolve axis we would just switch [ 0 , - 1 ] to
# [ - 1,0]
# since two vectors are the same , we know the middle
# one is one of those two
section_index = order [ np . array ( [ [ 0 , 1 ] , [ 1 , - 1 ] ] ) [ diff_zero ] ] . flatten ( )
section = mesh . principal_inertia_vectors [ section_index ]
# we know the rotation axis is the sole unique value
# and is either first or last of the sorted values
axis_index = order [ np . array ( [ - 1 , 0 ] ) [ diff_zero ] ] [ 0 ]
axis = mesh . principal_inertia_vectors [ axis_index ]
return 'radial' , axis , section
return None , None , None
|
def log_warn ( message , args ) :
"""Logs a warning message using the default logger ."""
|
get_logger ( DEFAULT_LOGGER , log_creation = False ) . log ( logging . WARNING , message , * args )
|
def bspline_to_nurbs ( obj ) :
"""Converts non - rational parametric shapes to rational ones .
: param obj : B - Spline shape
: type obj : BSpline . Curve , BSpline . Surface or BSpline . Volume
: return : NURBS shape
: rtype : NURBS . Curve , NURBS . Surface or NURBS . Volume
: raises : TypeError"""
|
# B - Spline - > NURBS
if isinstance ( obj , BSpline . Curve ) :
return _convert . convert_curve ( obj , NURBS )
elif isinstance ( obj , BSpline . Surface ) :
return _convert . convert_surface ( obj , NURBS )
elif isinstance ( obj , BSpline . Volume ) :
return _convert . convert_volume ( obj , NURBS )
else :
raise TypeError ( "Input must be an instance of B-Spline curve, surface or volume" )
|
def kernel_integrity ( attrs = None , where = None ) :
'''Return kernel _ integrity information from osquery
CLI Example :
. . code - block : : bash
salt ' * ' osquery . kernel _ integrity'''
|
if __grains__ [ 'os_family' ] in [ 'RedHat' , 'Debian' ] :
return _osquery_cmd ( table = 'kernel_integrity' , attrs = attrs , where = where )
return { 'result' : False , 'comment' : 'Only available on Red Hat or Debian based systems.' }
|
def generate_data ( dim = 40 , num_samples = 30000 , num_bins = 10 , sparse = False ) :
"""Generates data following appendix V of ( Poirazi & Mel , 2001 ) .
Positive and negative examples are drawn from the same distribution , but
are multiplied by different square matrices , one of them uniform on [ - 1 , 1
and one the sum of a uniform matrix and a normal one . It is assumed that half
the samples are negative in this case .
Initially samples of dimension dim are produced , with values in each
dimension being floats , but they are binned into discrete categories , with
num _ bins bins per dimension . This binning produces an SDR ."""
|
positives , negatives = [ ] , [ ]
positive , negative = generate_matrices ( dim )
for i in range ( num_samples ) :
phase_1 = generate_phase_1 ( dim )
phase_2 = generate_phase_2 ( phase_1 , dim )
if i < num_samples / 2 :
positives . append ( numpy . dot ( positive , phase_2 ) )
else :
negatives . append ( numpy . dot ( negative , phase_2 ) )
binned_data = bin_data ( positives + negatives , dim , num_bins )
positives = binned_data [ : len ( binned_data ) / 2 ]
negatives = binned_data [ len ( binned_data ) / 2 : ]
if sparse :
positives = SM32 ( positives )
negatives = SM32 ( negatives )
return positives , negatives
|
def compatible ( self , a , b ) :
"""Return ` True ` if type * a * is compatible with type * b * ."""
|
return len ( set ( [ a ] + self . descendants ( a ) ) . intersection ( [ b ] + self . descendants ( b ) ) ) > 0
|
def _set_where ( self ) :
"""Set the where clause for the relation query .
: return : self
: rtype : BelongsToMany"""
|
foreign = self . get_foreign_key ( )
self . _query . where ( foreign , "=" , self . _parent . get_key ( ) )
return self
|
def as_dict ( self ) :
"""turns attribute filter object into python dictionary"""
|
output_dictionary = dict ( )
for key , value in iter ( self . _key_map . items ( ) ) :
if isinstance ( value , bool ) :
output_dictionary [ key ] = value
elif isinstance ( value , self . __class__ ) :
output_dictionary [ key ] = value . as_dict ( )
return output_dictionary
|
def deepvalidation ( self ) :
"""Perform deep validation of this element .
Raises :
: class : ` DeepValidationError `"""
|
if self . doc and self . doc . deepvalidation and self . parent . set and self . parent . set [ 0 ] != '_' :
try :
self . doc . setdefinitions [ self . parent . set ] . testsubclass ( self . parent . cls , self . subset , self . cls )
except KeyError as e :
if self . parent . cls and not self . doc . allowadhocsets :
raise DeepValidationError ( "Set definition " + self . parent . set + " for " + self . parent . XMLTAG + " not loaded (feature validation failed)!" )
except DeepValidationError as e :
errormsg = str ( e ) + " (in set " + self . parent . set + " for " + self . parent . XMLTAG
if self . parent . id :
errormsg += " with ID " + self . parent . id
errormsg += ")"
raise DeepValidationError ( errormsg )
|
def get_categories ( blog_id , username , password ) :
"""metaWeblog . getCategories ( blog _ id , username , password )
= > category structure [ ]"""
|
authenticate ( username , password )
site = Site . objects . get_current ( )
return [ category_structure ( category , site ) for category in Category . objects . all ( ) ]
|
def i2c_config ( self , read_delay_time = 0 ) :
"""This method configures Arduino i2c with an optional read delay time .
: param read _ delay _ time : firmata i2c delay time
: returns : No return value"""
|
task = asyncio . ensure_future ( self . core . i2c_config ( read_delay_time ) )
self . loop . run_until_complete ( task )
|
def nice ( self , work_spec_name , nice ) :
'''Change the priority of an existing work spec .'''
|
with self . registry . lock ( identifier = self . worker_id ) as session :
session . update ( NICE_LEVELS , dict ( work_spec_name = nice ) )
|
def _families_and_addresses ( self , hostname , port ) :
"""Yield pairs of address families and addresses to try for connecting .
: param str hostname : the server to connect to
: param int port : the server port to connect to
: returns : Yields an iterable of ` ` ( family , address ) ` ` tuples"""
|
guess = True
addrinfos = socket . getaddrinfo ( hostname , port , socket . AF_UNSPEC , socket . SOCK_STREAM )
for ( family , socktype , proto , canonname , sockaddr ) in addrinfos :
if socktype == socket . SOCK_STREAM :
yield family , sockaddr
guess = False
# some OS like AIX don ' t indicate SOCK _ STREAM support , so just
# guess . : ( We only do this if we did not get a single result marked
# as socktype = = SOCK _ STREAM .
if guess :
for family , _ , _ , _ , sockaddr in addrinfos :
yield family , sockaddr
|
def _write_directory_records ( self , vd , outfp , progress ) : # type : ( headervd . PrimaryOrSupplementaryVD , BinaryIO , PyCdlib . _ Progress ) - > None
'''An internal method to write out the directory records from a particular
Volume Descriptor .
Parameters :
vd - The Volume Descriptor to write the Directory Records from .
outfp - The file object to write data to .
progress - The _ Progress object to use for outputting progress .
Returns :
Nothing .'''
|
log_block_size = vd . logical_block_size ( )
le_ptr_offset = 0
be_ptr_offset = 0
dirs = collections . deque ( [ vd . root_directory_record ( ) ] )
while dirs :
curr = dirs . popleft ( )
curr_dirrecord_offset = 0
if curr . is_dir ( ) :
if curr . ptr is None :
raise pycdlibexception . PyCdlibInternalError ( 'Directory has no Path Table Record' )
# Little Endian PTR
outfp . seek ( vd . path_table_location_le * log_block_size + le_ptr_offset )
ret = curr . ptr . record_little_endian ( )
self . _outfp_write_with_check ( outfp , ret )
le_ptr_offset += len ( ret )
# Big Endian PTR
outfp . seek ( vd . path_table_location_be * log_block_size + be_ptr_offset )
ret = curr . ptr . record_big_endian ( )
self . _outfp_write_with_check ( outfp , ret )
be_ptr_offset += len ( ret )
progress . call ( curr . get_data_length ( ) )
dir_extent = curr . extent_location ( )
for child in curr . children : # No matter what type the child is , we need to first write
# out the directory record entry .
recstr = child . record ( )
if ( curr_dirrecord_offset + len ( recstr ) ) > log_block_size :
dir_extent += 1
curr_dirrecord_offset = 0
outfp . seek ( dir_extent * log_block_size + curr_dirrecord_offset )
# Now write out the child
self . _outfp_write_with_check ( outfp , recstr )
curr_dirrecord_offset += len ( recstr )
if child . rock_ridge is not None :
if child . rock_ridge . dr_entries . ce_record is not None : # The child has a continue block , so write it out here .
ce_rec = child . rock_ridge . dr_entries . ce_record
outfp . seek ( ce_rec . bl_cont_area * self . pvd . logical_block_size ( ) + ce_rec . offset_cont_area )
rec = child . rock_ridge . record_ce_entries ( )
self . _outfp_write_with_check ( outfp , rec )
progress . call ( len ( rec ) )
if child . rock_ridge . child_link_record_exists ( ) :
continue
if child . is_dir ( ) : # If the child is a directory , and is not dot or dotdot ,
# we want to descend into it to look at the children .
if not child . is_dot ( ) and not child . is_dotdot ( ) :
dirs . append ( child )
|
def _remove_unlistened_nets ( block ) :
"""Removes all nets that are not connected to an output wirevector"""
|
listened_nets = set ( )
listened_wires = set ( )
prev_listened_net_count = 0
def add_to_listened ( net ) :
listened_nets . add ( net )
listened_wires . update ( net . args )
for a_net in block . logic :
if a_net . op == '@' :
add_to_listened ( a_net )
elif any ( isinstance ( destW , Output ) for destW in a_net . dests ) :
add_to_listened ( a_net )
while len ( listened_nets ) > prev_listened_net_count :
prev_listened_net_count = len ( listened_nets )
for net in block . logic - listened_nets :
if any ( ( destWire in listened_wires ) for destWire in net . dests ) :
add_to_listened ( net )
block . logic = listened_nets
_remove_unused_wires ( block )
|
def connect_command ( self ) :
'''Generates a JSON string with the params to be used
when sending CONNECT to the server .
- > > CONNECT { " verbose " : false , " pedantic " : false , " lang " : " python2 " }'''
|
options = { "verbose" : self . options [ "verbose" ] , "pedantic" : self . options [ "pedantic" ] , "lang" : __lang__ , "version" : __version__ , "protocol" : PROTOCOL }
if "auth_required" in self . _server_info :
if self . _server_info [ "auth_required" ] == True : # In case there is no password , then consider handle
# sending a token instead .
if self . options [ "user" ] is not None and self . options [ "password" ] is not None :
options [ "user" ] = self . options [ "user" ]
options [ "pass" ] = self . options [ "password" ]
elif self . options [ "token" ] is not None :
options [ "auth_token" ] = self . options [ "token" ]
elif self . _current_server . uri . password is None :
options [ "auth_token" ] = self . _current_server . uri . username
else :
options [ "user" ] = self . _current_server . uri . username
options [ "pass" ] = self . _current_server . uri . password
if self . options [ "name" ] is not None :
options [ "name" ] = self . options [ "name" ]
if self . options [ "no_echo" ] is not None :
options [ "echo" ] = not self . options [ "no_echo" ]
args = json . dumps ( options , sort_keys = True )
return CONNECT_PROTO . format ( CONNECT_OP , args , _CRLF_ )
|
def _operator ( self , op , close_group = False ) :
"""Add an operator between terms .
There must be a term added before using this method .
All operators have helpers , so this method is usually not necessary to directly invoke .
Arguments :
op ( str ) : The operator to add . Must be in the OP _ LIST .
close _ group ( bool ) : If ` ` True ` ` , will end the current parenthetical
group and start a new one .
If ` ` False ` ` , will continue current group .
Example : :
" ( foo AND bar ) " is one group .
" ( foo ) AND ( bar ) " is two groups .
Returns :
SearchHelper : Self"""
|
op = op . upper ( ) . strip ( )
if op not in OP_LIST :
raise ValueError ( "Error: '{}' is not a valid operator." . format ( op ) )
else :
if close_group :
op = ") " + op + " ("
else :
op = " " + op + " "
self . __query [ "q" ] += op
return self
|
def is_intent_name ( name ) : # type : ( str ) - > Callable [ [ HandlerInput ] , bool ]
"""A predicate function returning a boolean , when name matches the
name in Intent Request .
The function can be applied on a
: py : class : ` ask _ sdk _ core . handler _ input . HandlerInput ` , to
check if the input is of
: py : class : ` ask _ sdk _ model . intent _ request . IntentRequest ` type and if the
name of the request matches with the passed name .
: param name : Name to be matched with the Intent Request Name
: type name : str
: return : Predicate function that can be used to check name of the
request
: rtype : Callable [ [ HandlerInput ] , bool ]"""
|
def can_handle_wrapper ( handler_input ) : # type : ( HandlerInput ) - > bool
return ( isinstance ( handler_input . request_envelope . request , IntentRequest ) and handler_input . request_envelope . request . intent . name == name )
return can_handle_wrapper
|
def OnMoreSquareToggle ( self , event ) :
"""Toggle the more - square view ( better looking , but more likely to filter records )"""
|
self . squareMap . square_style = not self . squareMap . square_style
self . squareMap . Refresh ( )
self . moreSquareViewItem . Check ( self . squareMap . square_style )
|
def _load_calib ( self ) :
"""Load and compute intrinsic and extrinsic calibration parameters ."""
|
# We ' ll build the calibration parameters as a dictionary , then
# convert it to a namedtuple to prevent it from being modified later
data = { }
# Load the calibration file
calib_filepath = os . path . join ( self . sequence_path , 'calib.txt' )
filedata = utils . read_calib_file ( calib_filepath )
# Create 3x4 projection matrices
P_rect_00 = np . reshape ( filedata [ 'P0' ] , ( 3 , 4 ) )
P_rect_10 = np . reshape ( filedata [ 'P1' ] , ( 3 , 4 ) )
P_rect_20 = np . reshape ( filedata [ 'P2' ] , ( 3 , 4 ) )
P_rect_30 = np . reshape ( filedata [ 'P3' ] , ( 3 , 4 ) )
data [ 'P_rect_00' ] = P_rect_00
data [ 'P_rect_10' ] = P_rect_10
data [ 'P_rect_20' ] = P_rect_20
data [ 'P_rect_30' ] = P_rect_30
# Compute the rectified extrinsics from cam0 to camN
T1 = np . eye ( 4 )
T1 [ 0 , 3 ] = P_rect_10 [ 0 , 3 ] / P_rect_10 [ 0 , 0 ]
T2 = np . eye ( 4 )
T2 [ 0 , 3 ] = P_rect_20 [ 0 , 3 ] / P_rect_20 [ 0 , 0 ]
T3 = np . eye ( 4 )
T3 [ 0 , 3 ] = P_rect_30 [ 0 , 3 ] / P_rect_30 [ 0 , 0 ]
# Compute the velodyne to rectified camera coordinate transforms
data [ 'T_cam0_velo' ] = np . reshape ( filedata [ 'Tr' ] , ( 3 , 4 ) )
data [ 'T_cam0_velo' ] = np . vstack ( [ data [ 'T_cam0_velo' ] , [ 0 , 0 , 0 , 1 ] ] )
data [ 'T_cam1_velo' ] = T1 . dot ( data [ 'T_cam0_velo' ] )
data [ 'T_cam2_velo' ] = T2 . dot ( data [ 'T_cam0_velo' ] )
data [ 'T_cam3_velo' ] = T3 . dot ( data [ 'T_cam0_velo' ] )
# Compute the camera intrinsics
data [ 'K_cam0' ] = P_rect_00 [ 0 : 3 , 0 : 3 ]
data [ 'K_cam1' ] = P_rect_10 [ 0 : 3 , 0 : 3 ]
data [ 'K_cam2' ] = P_rect_20 [ 0 : 3 , 0 : 3 ]
data [ 'K_cam3' ] = P_rect_30 [ 0 : 3 , 0 : 3 ]
# Compute the stereo baselines in meters by projecting the origin of
# each camera frame into the velodyne frame and computing the distances
# between them
p_cam = np . array ( [ 0 , 0 , 0 , 1 ] )
p_velo0 = np . linalg . inv ( data [ 'T_cam0_velo' ] ) . dot ( p_cam )
p_velo1 = np . linalg . inv ( data [ 'T_cam1_velo' ] ) . dot ( p_cam )
p_velo2 = np . linalg . inv ( data [ 'T_cam2_velo' ] ) . dot ( p_cam )
p_velo3 = np . linalg . inv ( data [ 'T_cam3_velo' ] ) . dot ( p_cam )
data [ 'b_gray' ] = np . linalg . norm ( p_velo1 - p_velo0 )
# gray baseline
data [ 'b_rgb' ] = np . linalg . norm ( p_velo3 - p_velo2 )
# rgb baseline
self . calib = namedtuple ( 'CalibData' , data . keys ( ) ) ( * data . values ( ) )
|
def find_repositories_by_walking_without_following_symlinks ( path ) :
"""Walk a tree and return a sequence of ( directory , dotdir ) pairs ."""
|
repos = [ ]
for dirpath , dirnames , filenames in os . walk ( path , followlinks = False ) :
for dotdir in set ( dirnames ) & DOTDIRS :
repos . append ( ( dirpath , dotdir ) )
return repos
|
def PintPars ( datablock , araiblock , zijdblock , start , end , accept , ** kwargs ) :
"""calculate the paleointensity magic parameters make some definitions"""
|
if 'version' in list ( kwargs . keys ( ) ) and kwargs [ 'version' ] == 3 :
meth_key = 'method_codes'
beta_key = 'int_b_beta'
temp_key , min_key , max_key = 'treat_temp' , 'meas_step_min' , 'meas_step_max'
dc_theta_key , dc_phi_key = 'treat_dc_field_theta' , 'treat_dc_field_phi'
# convert dataframe to list of dictionaries
datablock = datablock . to_dict ( 'records' )
z_key = 'int_z'
drats_key = 'int_drats'
drat_key = 'int_drat'
md_key = 'int_md'
dec_key = 'dir_dec'
inc_key = 'dir_inc'
mad_key = 'int_mad_free'
dang_key = 'int_dang'
ptrm_key = 'int_n_ptrm'
theta_key = 'int_theta'
gamma_key = 'int_gamma'
delta_key = 'int_delta'
frac_key = 'int_frac'
gmax_key = 'int_gmax'
scat_key = 'int_scat'
else :
beta_key = 'specimen_b_beta'
meth_key = 'magic_method_codes'
temp_key , min_key , max_key = 'treatment_temp' , 'measurement_step_min' , 'measurement_step_max'
z_key = 'specimen_z'
drats_key = 'specimen_drats'
drat_key = 'specimen_drat'
md_key = 'specimen_md'
dec_key = 'specimen_dec'
inc_key = 'specimen_inc'
mad_key = 'specimen_int_mad'
dang_key = 'specimen_dang'
ptrm_key = 'specimen_int_ptrm_n'
theta_key = 'specimen_theta'
gamma_key = 'specimen_gamma'
delta_key = 'specimen_delta'
frac_key = 'specimen_frac'
gmax_key = 'specimen_gmax'
scat_key = 'specimen_scat'
first_Z , first_I , zptrm_check , ptrm_check , ptrm_tail = [ ] , [ ] , [ ] , [ ] , [ ]
methcode , ThetaChecks , DeltaChecks , GammaChecks = "" , "" , "" , ""
zptrm_check = [ ]
first_Z , first_I , ptrm_check , ptrm_tail , zptrm_check , GammaChecks = araiblock [ 0 ] , araiblock [ 1 ] , araiblock [ 2 ] , araiblock [ 3 ] , araiblock [ 4 ] , araiblock [ 5 ]
if len ( araiblock ) > 6 : # used only for perpendicular method of paleointensity
ThetaChecks = araiblock [ 6 ]
# used only for perpendicular method of paleointensity
DeltaChecks = araiblock [ 7 ]
xi , yi , diffcum = [ ] , [ ] , 0
xiz , xzi , yiz , yzi = [ ] , [ ] , [ ] , [ ]
Nptrm , dmax = 0 , - 1e-22
# check if even zero and infield steps
if len ( first_Z ) > len ( first_I ) :
maxe = len ( first_I ) - 1
else :
maxe = len ( first_Z ) - 1
if end == 0 or end > maxe :
end = maxe
# get the MAD , DANG , etc . for directional data
bstep = araiblock [ 0 ] [ start ] [ 0 ]
estep = araiblock [ 0 ] [ end ] [ 0 ]
zstart , zend = 0 , len ( zijdblock )
for k in range ( len ( zijdblock ) ) :
zrec = zijdblock [ k ]
if zrec [ 0 ] == bstep :
zstart = k
if zrec [ 0 ] == estep :
zend = k
PCA = domean ( zijdblock , zstart , zend , 'DE-BFL' )
D , Diz , Dzi , Du = [ ] , [ ] , [ ] , [ ]
# list of NRM vectors , and separated by zi and iz
for rec in zijdblock :
D . append ( ( rec [ 1 ] , rec [ 2 ] , rec [ 3 ] ) )
Du . append ( ( rec [ 1 ] , rec [ 2 ] ) )
if rec [ 4 ] == 1 :
Dzi . append ( ( rec [ 1 ] , rec [ 2 ] ) )
# if this is ZI step
else :
Diz . append ( ( rec [ 1 ] , rec [ 2 ] ) )
# if this is IZ step
# calculate the vector difference sum
vds = dovds ( D )
b_zi , b_iz = [ ] , [ ]
# collect data included in ZigZag calculation
if end + 1 >= len ( first_Z ) :
stop = end - 1
else :
stop = end
for k in range ( start , end + 1 ) :
for l in range ( len ( first_I ) ) :
irec = first_I [ l ]
if irec [ 0 ] == first_Z [ k ] [ 0 ] :
xi . append ( irec [ 3 ] )
yi . append ( first_Z [ k ] [ 3 ] )
pars , errcode = int_pars ( xi , yi , vds )
if errcode == 1 :
return pars , errcode
# for k in range ( start , end + 1 ) :
for k in range ( len ( first_Z ) - 1 ) :
for l in range ( k ) : # only go down to 10 % of NRM . . . . .
if old_div ( first_Z [ k ] [ 3 ] , vds ) > 0.1 :
irec = first_I [ l ]
if irec [ 4 ] == 1 and first_I [ l + 1 ] [ 4 ] == 0 : # a ZI step
xzi = irec [ 3 ]
yzi = first_Z [ k ] [ 3 ]
xiz = first_I [ l + 1 ] [ 3 ]
yiz = first_Z [ k + 1 ] [ 3 ]
slope = np . arctan2 ( ( yzi - yiz ) , ( xiz - xzi ) )
r = np . sqrt ( ( yzi - yiz ) ** 2 + ( xiz - xzi ) ** 2 )
if r > .1 * vds :
b_zi . append ( slope )
# suppress noise
elif irec [ 4 ] == 0 and first_I [ l + 1 ] [ 4 ] == 1 : # an IZ step
xiz = irec [ 3 ]
yiz = first_Z [ k ] [ 3 ]
xzi = first_I [ l + 1 ] [ 3 ]
yzi = first_Z [ k + 1 ] [ 3 ]
slope = np . arctan2 ( ( yiz - yzi ) , ( xzi - xiz ) )
r = np . sqrt ( ( yiz - yzi ) ** 2 + ( xzi - xiz ) ** 2 )
if r > .1 * vds :
b_iz . append ( slope )
# suppress noise
ZigZag , Frat , Trat = - 1 , 0 , 0
if len ( Diz ) > 2 and len ( Dzi ) > 2 :
ZigZag = 0
dizp = fisher_mean ( Diz )
# get Fisher stats on IZ steps
dzip = fisher_mean ( Dzi )
# get Fisher stats on ZI steps
dup = fisher_mean ( Du )
# get Fisher stats on all steps
# if directions are TOO well grouped , can get false positive for ftest , so
# angles must be > 3 degrees apart .
if angle ( [ dizp [ 'dec' ] , dizp [ 'inc' ] ] , [ dzip [ 'dec' ] , dzip [ 'inc' ] ] ) > 3. :
F = ( dup [ 'n' ] - 2. ) * ( dzip [ 'r' ] + dizp [ 'r' ] - dup [ 'r' ] ) / ( dup [ 'n' ] - dzip [ 'r' ] - dizp [ 'r' ] )
# Watson test for common mean
nf = 2. * ( dup [ 'n' ] - 2. )
# number of degees of freedom
ftest = fcalc ( 2 , nf )
Frat = old_div ( F , ftest )
if Frat > 1. :
ZigZag = Frat
# fails zigzag on directions
methcode = "SM-FTEST"
# now do slopes
if len ( b_zi ) > 2 and len ( b_iz ) > 2 :
bzi_m , bzi_sig = gausspars ( b_zi )
# mean , std dev
biz_m , biz_sig = gausspars ( b_iz )
n_zi = float ( len ( b_zi ) )
n_iz = float ( len ( b_iz ) )
b_diff = abs ( bzi_m - biz_m )
# difference in means
# avoid false positives - set 3 degree slope difference here too
if b_diff > 3 * np . pi / 180. :
nf = n_zi + n_iz - 2.
# degrees of freedom
svar = old_div ( ( ( n_zi - 1. ) * bzi_sig ** 2 + ( n_iz - 1. ) * biz_sig ** 2 ) , nf )
T = old_div ( ( b_diff ) , np . sqrt ( svar * ( old_div ( 1.0 , n_zi ) + old_div ( 1.0 , n_iz ) ) ) )
# student ' s t
ttest = tcalc ( nf , .05 )
# t - test at 95 % conf .
Trat = old_div ( T , ttest )
if Trat > 1 and Trat > Frat :
ZigZag = Trat
# fails zigzag on directions
methcode = "SM-TTEST"
pars [ z_key ] = ZigZag
pars [ meth_key ] = methcode
# do drats
if len ( ptrm_check ) != 0 :
diffcum , drat_max = 0 , 0
for prec in ptrm_check :
step = prec [ 0 ]
endbak = end
zend = end
while zend > len ( zijdblock ) - 1 :
zend = zend - 2
# don ' t count alteration that happens after this step
if step < zijdblock [ zend ] [ 0 ] :
Nptrm += 1
for irec in first_I :
if irec [ 0 ] == step :
break
diffcum += prec [ 3 ] - irec [ 3 ]
if abs ( prec [ 3 ] - irec [ 3 ] ) > drat_max :
drat_max = abs ( prec [ 3 ] - irec [ 3 ] )
pars [ drats_key ] = ( 100 * abs ( diffcum ) / first_I [ zend ] [ 3 ] )
pars [ drat_key ] = ( 100 * abs ( drat_max ) / first_I [ zend ] [ 3 ] )
elif len ( zptrm_check ) != 0 :
diffcum = 0
for prec in zptrm_check :
step = prec [ 0 ]
endbak = end
zend = end
while zend > len ( zijdblock ) - 1 :
zend = zend - 1
if step < zijdblock [ zend ] [ 0 ] :
Nptrm += 1
for irec in first_I :
if irec [ 0 ] == step :
break
diffcum += prec [ 3 ] - irec [ 3 ]
pars [ drats_key ] = ( 100 * abs ( diffcum ) / first_I [ zend ] [ 3 ] )
else :
pars [ drats_key ] = - 1
pars [ drat_key ] = - 1
# and the pTRM tails
if len ( ptrm_tail ) != 0 :
for trec in ptrm_tail :
step = trec [ 0 ]
for irec in first_I :
if irec [ 0 ] == step :
break
if abs ( trec [ 3 ] ) > dmax :
dmax = abs ( trec [ 3 ] )
pars [ md_key ] = ( 100 * dmax / vds )
else :
pars [ md_key ] = - 1
pars [ min_key ] = bstep
pars [ max_key ] = estep
pars [ dec_key ] = PCA [ "specimen_dec" ]
pars [ inc_key ] = PCA [ "specimen_inc" ]
pars [ mad_key ] = PCA [ "specimen_mad" ]
pars [ dang_key ] = PCA [ "specimen_dang" ]
pars [ ptrm_key ] = Nptrm
# and the ThetaChecks
if ThetaChecks != "" :
t = 0
for theta in ThetaChecks :
if theta [ 0 ] >= bstep and theta [ 0 ] <= estep and theta [ 1 ] > t :
t = theta [ 1 ]
pars [ theta_key ] = t
else :
pars [ theta_key ] = - 1
# and the DeltaChecks
if DeltaChecks != "" :
d = 0
for delta in DeltaChecks :
if delta [ 0 ] >= bstep and delta [ 0 ] <= estep and delta [ 1 ] > d :
d = delta [ 1 ]
pars [ delta_key ]
else :
pars [ delta_key ] = - 1
pars [ gamma_key ] = - 1
if GammaChecks != "" :
for gamma in GammaChecks :
if gamma [ 0 ] <= estep :
pars [ 'specimen_gamma' ] = gamma [ 1 ]
# From here added By Ron Shaar 11 - Dec 2012
# New parameters defined in Shaar and Tauxe ( 2012 ) :
# FRAC ( specimen _ frac ) - ranges from 0 . to 1.
# SCAT ( specimen _ scat ) - takes 1/0
# gap _ max ( specimen _ gmax ) - ranges from 0 . to 1.
# FRAC is similar to Fvds , but the numerator is the vds fraction :
# FRAC = [ vds ( start , end ) ] / total vds ]
# gap _ max = max [ ( vector difference ) / vds ( start , end ) ]
# collect all zijderveld data to arrays and calculate VDS
z_temperatures = [ row [ 0 ] for row in zijdblock ]
zdata = [ ]
# array of zero - fields measurements in Cartezian coordinates
# array of vector differences ( for vds calculation )
vector_diffs = [ ]
NRM = zijdblock [ 0 ] [ 3 ]
# NRM
for k in range ( len ( zijdblock ) ) :
DIR = [ zijdblock [ k ] [ 1 ] , zijdblock [ k ] [ 2 ] , old_div ( zijdblock [ k ] [ 3 ] , NRM ) ]
cart = dir2cart ( DIR )
zdata . append ( np . array ( [ cart [ 0 ] , cart [ 1 ] , cart [ 2 ] ] ) )
if k > 0 :
vector_diffs . append ( np . sqrt ( sum ( ( np . array ( zdata [ - 2 ] ) - np . array ( zdata [ - 1 ] ) ) ** 2 ) ) )
# last vector difference : from the last point to the origin .
vector_diffs . append ( np . sqrt ( sum ( np . array ( zdata [ - 1 ] ) ** 2 ) ) )
vds = sum ( vector_diffs )
# vds calculation
zdata = np . array ( zdata )
vector_diffs = np . array ( vector_diffs )
# calculate the vds within the chosen segment
vector_diffs_segment = vector_diffs [ zstart : zend ]
# FRAC calculation
FRAC = old_div ( sum ( vector_diffs_segment ) , vds )
pars [ frac_key ] = FRAC
# gap _ max calculation
max_FRAC_gap = max ( old_div ( vector_diffs_segment , sum ( vector_diffs_segment ) ) )
pars [ gmax_key ] = max_FRAC_gap
# Calculate the " scat box "
# all data - points , pTRM checks , and tail - checks , should be inside a " scat box "
# intialization
# fail scat due to arai plot data points
pars [ "fail_arai_beta_box_scatter" ] = False
pars [ "fail_ptrm_beta_box_scatter" ] = False
# fail scat due to pTRM checks
pars [ "fail_tail_beta_box_scatter" ] = False
# fail scat due to tail checks
pars [ scat_key ] = "t"
# Pass by default
# collect all Arai plot data points in arrays
x_Arai , y_Arai , t_Arai , steps_Arai = [ ] , [ ] , [ ] , [ ]
NRMs = araiblock [ 0 ]
PTRMs = araiblock [ 1 ]
ptrm_checks = araiblock [ 2 ]
ptrm_tail = araiblock [ 3 ]
PTRMs_temperatures = [ row [ 0 ] for row in PTRMs ]
NRMs_temperatures = [ row [ 0 ] for row in NRMs ]
NRM = NRMs [ 0 ] [ 3 ]
for k in range ( len ( NRMs ) ) :
index_pTRMs = PTRMs_temperatures . index ( NRMs [ k ] [ 0 ] )
x_Arai . append ( old_div ( PTRMs [ index_pTRMs ] [ 3 ] , NRM ) )
y_Arai . append ( old_div ( NRMs [ k ] [ 3 ] , NRM ) )
t_Arai . append ( NRMs [ k ] [ 0 ] )
if NRMs [ k ] [ 4 ] == 1 :
steps_Arai . append ( 'ZI' )
else :
steps_Arai . append ( 'IZ' )
x_Arai = np . array ( x_Arai )
y_Arai = np . array ( y_Arai )
# collect all pTRM check to arrays
x_ptrm_check , y_ptrm_check , ptrm_checks_temperatures , = [ ] , [ ] , [ ]
x_ptrm_check_starting_point , y_ptrm_check_starting_point , ptrm_checks_starting_temperatures = [ ] , [ ] , [ ]
for k in range ( len ( ptrm_checks ) ) :
if ptrm_checks [ k ] [ 0 ] in NRMs_temperatures : # find the starting point of the pTRM check :
for i in range ( len ( datablock ) ) :
rec = datablock [ i ]
if "LT-PTRM-I" in rec [ meth_key ] and float ( rec [ temp_key ] ) == ptrm_checks [ k ] [ 0 ] :
starting_temperature = ( float ( datablock [ i - 1 ] [ temp_key ] ) )
try :
index = t_Arai . index ( starting_temperature )
x_ptrm_check_starting_point . append ( x_Arai [ index ] )
y_ptrm_check_starting_point . append ( y_Arai [ index ] )
ptrm_checks_starting_temperatures . append ( starting_temperature )
index_zerofield = zerofield_temperatures . index ( ptrm_checks [ k ] [ 0 ] )
x_ptrm_check . append ( old_div ( ptrm_checks [ k ] [ 3 ] , NRM ) )
y_ptrm_check . append ( old_div ( zerofields [ index_zerofield ] [ 3 ] , NRM ) )
ptrm_checks_temperatures . append ( ptrm_checks [ k ] [ 0 ] )
break
except :
pass
x_ptrm_check_starting_point = np . array ( x_ptrm_check_starting_point )
y_ptrm_check_starting_point = np . array ( y_ptrm_check_starting_point )
ptrm_checks_starting_temperatures = np . array ( ptrm_checks_starting_temperatures )
x_ptrm_check = np . array ( x_ptrm_check )
y_ptrm_check = np . array ( y_ptrm_check )
ptrm_checks_temperatures = np . array ( ptrm_checks_temperatures )
# collect tail checks to arrays
x_tail_check , y_tail_check , tail_check_temperatures = [ ] , [ ] , [ ]
x_tail_check_starting_point , y_tail_check_starting_point , tail_checks_starting_temperatures = [ ] , [ ] , [ ]
for k in range ( len ( ptrm_tail ) ) :
if ptrm_tail [ k ] [ 0 ] in NRMs_temperatures : # find the starting point of the pTRM check :
for i in range ( len ( datablock ) ) :
rec = datablock [ i ]
if "LT-PTRM-MD" in rec [ meth_key ] and float ( rec [ temp_key ] ) == ptrm_tail [ k ] [ 0 ] :
starting_temperature = ( float ( datablock [ i - 1 ] [ temp_key ] ) )
try :
index = t_Arai . index ( starting_temperature )
x_tail_check_starting_point . append ( x_Arai [ index ] )
y_tail_check_starting_point . append ( y_Arai [ index ] )
tail_checks_starting_temperatures . append ( starting_temperature )
index_infield = infield_temperatures . index ( ptrm_tail [ k ] [ 0 ] )
x_tail_check . append ( old_div ( infields [ index_infield ] [ 3 ] , NRM ) )
y_tail_check . append ( old_div ( ptrm_tail [ k ] [ 3 ] , NRM ) + old_div ( zerofields [ index_infield ] [ 3 ] , NRM ) )
tail_check_temperatures . append ( ptrm_tail [ k ] [ 0 ] )
break
except :
pass
x_tail_check = np . array ( x_tail_check )
y_tail_check = np . array ( y_tail_check )
tail_check_temperatures = np . array ( tail_check_temperatures )
x_tail_check_starting_point = np . array ( x_tail_check_starting_point )
y_tail_check_starting_point = np . array ( y_tail_check_starting_point )
tail_checks_starting_temperatures = np . array ( tail_checks_starting_temperatures )
# collect the chosen segment in the Arai plot to arrays
x_Arai_segment = x_Arai [ start : end + 1 ]
# chosen segent in the Arai plot
y_Arai_segment = y_Arai [ start : end + 1 ]
# chosen segent in the Arai plot
# collect pTRM checks in segment to arrays
# notice , this is different than the conventional DRATS .
# for scat calculation we take only the pTRM checks which were carried out
# before reaching the highest temperature in the chosen segment
x_ptrm_check_for_SCAT , y_ptrm_check_for_SCAT = [ ] , [ ]
for k in range ( len ( ptrm_checks_temperatures ) ) :
if ptrm_checks_temperatures [ k ] >= pars [ min_key ] and ptrm_checks_starting_temperatures <= pars [ max_key ] :
x_ptrm_check_for_SCAT . append ( x_ptrm_check [ k ] )
y_ptrm_check_for_SCAT . append ( y_ptrm_check [ k ] )
x_ptrm_check_for_SCAT = np . array ( x_ptrm_check_for_SCAT )
y_ptrm_check_for_SCAT = np . array ( y_ptrm_check_for_SCAT )
# collect Tail checks in segment to arrays
# for scat calculation we take only the tail checks which were carried out
# before reaching the highest temperature in the chosen segment
x_tail_check_for_SCAT , y_tail_check_for_SCAT = [ ] , [ ]
for k in range ( len ( tail_check_temperatures ) ) :
if tail_check_temperatures [ k ] >= pars [ min_key ] and tail_checks_starting_temperatures [ k ] <= pars [ max_key ] :
x_tail_check_for_SCAT . append ( x_tail_check [ k ] )
y_tail_check_for_SCAT . append ( y_tail_check [ k ] )
x_tail_check_for_SCAT = np . array ( x_tail_check_for_SCAT )
y_tail_check_for_SCAT = np . array ( y_tail_check_for_SCAT )
# calculate the lines that define the scat box :
# if threshold value for beta is not defined , then scat cannot be calculated ( pass )
# in this case , scat pass
if beta_key in list ( accept . keys ( ) ) and accept [ beta_key ] != "" :
b_beta_threshold = float ( accept [ beta_key ] )
b = pars [ b_key ]
# best fit line
cm_x = np . mean ( np . array ( x_Arai_segment ) )
# x center of mass
cm_y = np . mean ( np . array ( y_Arai_segment ) )
# y center of mass
a = cm_y - b * cm_x
# lines with slope = slope + / - 2 * ( specimen _ b _ beta )
two_sigma_beta_threshold = 2 * b_beta_threshold
two_sigma_slope_threshold = abs ( two_sigma_beta_threshold * b )
# a line with a shallower slope ( b + 2 * beta * b ) passing through the center of mass
# y = a1 + b1x
b1 = b + two_sigma_slope_threshold
a1 = cm_y - b1 * cm_x
# bounding line with steeper slope ( b - 2 * beta * b ) passing through the center of mass
# y = a2 + b2x
b2 = b - two_sigma_slope_threshold
a2 = cm_y - b2 * cm_x
# lower bounding line of the ' beta box '
# y = intercept1 + slop1x
slop1 = old_div ( a1 , ( ( old_div ( a2 , b2 ) ) ) )
intercept1 = a1
# higher bounding line of the ' beta box '
# y = intercept2 + slop2x
slop2 = old_div ( a2 , ( ( old_div ( a1 , b1 ) ) ) )
intercept2 = a2
pars [ 'specimen_scat_bounding_line_high' ] = [ intercept2 , slop2 ]
pars [ 'specimen_scat_bounding_line_low' ] = [ intercept1 , slop1 ]
# check if the Arai data points are in the ' box '
# the two bounding lines
ymin = intercept1 + x_Arai_segment * slop1
ymax = intercept2 + x_Arai_segment * slop2
# arrays of " True " or " False "
check_1 = y_Arai_segment > ymax
check_2 = y_Arai_segment < ymin
# check if at least one " True "
if ( sum ( check_1 ) + sum ( check_2 ) ) > 0 :
pars [ "fail_arai_beta_box_scatter" ] = True
# check if the pTRM checks data points are in the ' box '
if len ( x_ptrm_check_for_SCAT ) > 0 : # the two bounding lines
ymin = intercept1 + x_ptrm_check_for_SCAT * slop1
ymax = intercept2 + x_ptrm_check_for_SCAT * slop2
# arrays of " True " or " False "
check_1 = y_ptrm_check_for_SCAT > ymax
check_2 = y_ptrm_check_for_SCAT < ymin
# check if at least one " True "
if ( sum ( check_1 ) + sum ( check_2 ) ) > 0 :
pars [ "fail_ptrm_beta_box_scatter" ] = True
# check if the tail checks data points are in the ' box '
if len ( x_tail_check_for_SCAT ) > 0 : # the two bounding lines
ymin = intercept1 + x_tail_check_for_SCAT * slop1
ymax = intercept2 + x_tail_check_for_SCAT * slop2
# arrays of " True " or " False "
check_1 = y_tail_check_for_SCAT > ymax
check_2 = y_tail_check_for_SCAT < ymin
# check if at least one " True "
if ( sum ( check_1 ) + sum ( check_2 ) ) > 0 :
pars [ "fail_tail_beta_box_scatter" ] = True
# check if specimen _ scat is PASS or FAIL :
if pars [ "fail_tail_beta_box_scatter" ] or pars [ "fail_ptrm_beta_box_scatter" ] or pars [ "fail_arai_beta_box_scatter" ] :
pars [ scat_key ] = 'f'
else :
pars [ scat_key ] = 't'
return pars , 0
|
def _escalation_rules_to_string ( escalation_rules ) :
'convert escalation _ rules dict to a string for comparison'
|
result = ''
for rule in escalation_rules :
result += 'escalation_delay_in_minutes: {0} ' . format ( rule [ 'escalation_delay_in_minutes' ] )
for target in rule [ 'targets' ] :
result += '{0}:{1} ' . format ( target [ 'type' ] , target [ 'id' ] )
return result
|
def update_extent_location ( self , extent_loc ) : # type : ( int ) - > None
'''A method to update the extent location for this Path Table Record .
Parameters :
extent _ loc - The new extent location .
Returns :
Nothing .'''
|
if not self . _initialized :
raise pycdlibexception . PyCdlibInternalError ( 'Path Table Record not yet initialized' )
self . extent_location = extent_loc
|
def processor ( ctx , processor_cls , process_time_limit , enable_stdout_capture = True , get_object = False ) :
"""Run Processor ."""
|
g = ctx . obj
Processor = load_cls ( None , None , processor_cls )
processor = Processor ( projectdb = g . projectdb , inqueue = g . fetcher2processor , status_queue = g . status_queue , newtask_queue = g . newtask_queue , result_queue = g . processor2result , enable_stdout_capture = enable_stdout_capture , process_time_limit = process_time_limit )
g . instances . append ( processor )
if g . get ( 'testing_mode' ) or get_object :
return processor
processor . run ( )
|
def add_apt_source ( source , key = None , update = True ) :
"""Adds source url to apt sources . list . Optional to pass the key url ."""
|
# Make a backup of list
source_list = u'/etc/apt/sources.list'
sudo ( "cp %s{,.bak}" % source_list )
files . append ( source_list , source , use_sudo = True )
if key : # Fecth key from url and add
sudo ( u"wget -q %s -O - | sudo apt-key add -" % key )
if update :
update_apt_sources ( )
|
def add_iterative_predicate ( self , key , values_list ) :
"""add an iterative predicate with a key and set of values
which it can be equal to in and or function .
The individual predicates are specified with the type ` ` equals ` ` and
combined with a type ` ` or ` ` .
The main reason for this addition is the inability of using ` ` in ` ` as
predicate type wfor multiple taxon _ key values
( cfr . http : / / dev . gbif . org / issues / browse / POR - 2753)
: param key : API key to use for the query .
: param values _ list : Filename or list containing the taxon keys to be s
searched ."""
|
values = self . _extract_values ( values_list )
predicate = { 'type' : 'equals' , 'key' : key , 'value' : None }
predicates = [ ]
while values :
predicate [ 'value' ] = values . pop ( )
predicates . append ( predicate . copy ( ) )
self . predicates . append ( { 'type' : 'or' , 'predicates' : predicates } )
|
def get_git_info ( ) :
"""Get a dict with useful git info ."""
|
start_dir = abspath ( curdir )
try :
chdir ( dirname ( abspath ( __file__ ) ) )
re_patt_str = ( r'commit\s+(?P<commit_hash>\w+).*?Author:\s+' r'(?P<author_name>.*?)\s+<(?P<author_email>.*?)>\s+Date:\s+' r'(?P<date>.*?)\n\s+(?P<commit_msg>.*?)(?:\ndiff.*?)?$' )
show_out = check_output ( [ 'git' , 'show' ] ) . decode ( 'ascii' )
revp_out = check_output ( [ 'git' , 'rev-parse' , '--abbrev-ref' , 'HEAD' ] )
revp_out = revp_out . decode ( 'ascii' ) . strip ( )
m = re . search ( re_patt_str , show_out , re . DOTALL )
assert m is not None , "Regex pattern:\n\n\"%s\"\n\n failed to match string:\n\n\"%s\"" % ( re_patt_str , show_out )
ret_dict = m . groupdict ( )
ret_dict [ 'branch_name' ] = revp_out
finally :
chdir ( start_dir )
return ret_dict
|
def construct_error_generator_middleware ( error_generators ) :
"""Constructs a middleware which intercepts requests for any method found in
the provided mapping of endpoints to generator functions , returning
whatever error message the generator function returns . Callbacks must be
functions with the signature ` fn ( method , params ) ` ."""
|
def error_generator_middleware ( make_request , web3 ) :
def middleware ( method , params ) :
if method in error_generators :
error_msg = error_generators [ method ] ( method , params )
return { 'error' : error_msg }
else :
return make_request ( method , params )
return middleware
return error_generator_middleware
|
def _ConvertFieldValuePair ( self , js , message ) :
"""Convert field value pairs into regular message .
Args :
js : A JSON object to convert the field value pairs .
message : A regular protocol message to record the data .
Raises :
ParseError : In case of problems converting ."""
|
names = [ ]
message_descriptor = message . DESCRIPTOR
fields_by_json_name = dict ( ( f . json_name , f ) for f in message_descriptor . fields )
for name in js :
try :
field = fields_by_json_name . get ( name , None )
if not field :
field = message_descriptor . fields_by_name . get ( name , None )
if not field :
if self . ignore_unknown_fields :
continue
raise ParseError ( 'Message type "{0}" has no field named "{1}".' . format ( message_descriptor . full_name , name ) )
if name in names :
raise ParseError ( 'Message type "{0}" should not have multiple ' '"{1}" fields.' . format ( message . DESCRIPTOR . full_name , name ) )
names . append ( name )
# Check no other oneof field is parsed .
if field . containing_oneof is not None :
oneof_name = field . containing_oneof . name
if oneof_name in names :
raise ParseError ( 'Message type "{0}" should not have multiple ' '"{1}" oneof fields.' . format ( message . DESCRIPTOR . full_name , oneof_name ) )
names . append ( oneof_name )
value = js [ name ]
if value is None :
if ( field . cpp_type == descriptor . FieldDescriptor . CPPTYPE_MESSAGE and field . message_type . full_name == 'google.protobuf.Value' ) :
sub_message = getattr ( message , field . name )
sub_message . null_value = 0
else :
message . ClearField ( field . name )
continue
# Parse field value .
if _IsMapEntry ( field ) :
message . ClearField ( field . name )
self . _ConvertMapFieldValue ( value , message , field )
elif field . label == descriptor . FieldDescriptor . LABEL_REPEATED :
message . ClearField ( field . name )
if not isinstance ( value , list ) :
raise ParseError ( 'repeated field {0} must be in [] which is ' '{1}.' . format ( name , value ) )
if field . cpp_type == descriptor . FieldDescriptor . CPPTYPE_MESSAGE : # Repeated message field .
for item in value :
sub_message = getattr ( message , field . name ) . add ( )
# None is a null _ value in Value .
if ( item is None and sub_message . DESCRIPTOR . full_name != 'google.protobuf.Value' ) :
raise ParseError ( 'null is not allowed to be used as an element' ' in a repeated field.' )
self . ConvertMessage ( item , sub_message )
else : # Repeated scalar field .
for item in value :
if item is None :
raise ParseError ( 'null is not allowed to be used as an element' ' in a repeated field.' )
getattr ( message , field . name ) . append ( _ConvertScalarFieldValue ( item , field ) )
elif field . cpp_type == descriptor . FieldDescriptor . CPPTYPE_MESSAGE :
sub_message = getattr ( message , field . name )
sub_message . SetInParent ( )
self . ConvertMessage ( value , sub_message )
else :
setattr ( message , field . name , _ConvertScalarFieldValue ( value , field ) )
except ParseError as e :
if field and field . containing_oneof is None :
raise ParseError ( 'Failed to parse {0} field: {1}' . format ( name , e ) )
else :
raise ParseError ( str ( e ) )
except ValueError as e :
raise ParseError ( 'Failed to parse {0} field: {1}.' . format ( name , e ) )
except TypeError as e :
raise ParseError ( 'Failed to parse {0} field: {1}.' . format ( name , e ) )
|
def _merge_a_into_b_simple ( self , a , b ) :
"""Merge config dictionary a into config dictionary b , clobbering the
options in b whenever they are also specified in a . Do not do any checking ."""
|
for k , v in a . items ( ) :
b [ k ] = v
return b
|
def load ( self , file , name = None ) :
"""Load a file . The format name can be specified explicitly or
inferred from the file extension ."""
|
if name is None :
name = self . format_from_extension ( op . splitext ( file ) [ 1 ] )
file_format = self . file_type ( name )
if file_format == 'text' :
return _read_text ( file )
elif file_format == 'json' :
return _read_json ( file )
else :
load_function = self . _formats [ name ] . get ( 'load' , None )
if load_function is None :
raise IOError ( "The format must declare a file type or " "load/save functions." )
return load_function ( file )
|
def get_workflow_id_and_project ( path ) :
''': param path : a path or ID to a workflow object
: type path : string
: returns : tuple of ( workflow ID , project ID )
Returns the workflow and project IDs from the given path if
available ; otherwise , exits with an appropriate error message .'''
|
project , _folderpath , entity_result = try_call ( resolve_existing_path , path , expected = 'entity' )
try :
if entity_result is None or not entity_result [ 'id' ] . startswith ( 'workflow-' ) :
raise DXCLIError ( 'Could not resolve "' + path + '" to a workflow object' )
except :
err_exit ( )
return entity_result [ 'id' ] , project
|
def check_path ( filename , reporter = modReporter . Default , settings_path = None , ** setting_overrides ) :
"""Check the given path , printing out any warnings detected ."""
|
try :
with open ( filename , 'U' ) as f :
codestr = f . read ( ) + '\n'
except UnicodeError :
reporter . unexpected_error ( filename , 'problem decoding source' )
return 1
except IOError :
msg = sys . exc_info ( ) [ 1 ]
reporter . unexpected_error ( filename , msg . args [ 1 ] )
return 1
return check ( codestr , filename , reporter , settings_path , ** setting_overrides )
|
def _poll ( self ) :
"""Poll Trusted Advisor ( Support ) API for limit checks .
Return a dict of service name ( string ) keys to nested dict vals , where
each key is a limit name and each value the current numeric limit .
e . g . :
' EC2 ' : {
' SomeLimit ' : 10,"""
|
logger . info ( "Beginning TrustedAdvisor poll" )
tmp = self . _get_limit_check_id ( )
if not self . have_ta :
logger . info ( 'TrustedAdvisor.have_ta is False; not polling TA' )
return { }
if tmp is None :
logger . critical ( "Unable to find 'Service Limits' Trusted Advisor " "check; not using Trusted Advisor data." )
return
check_id , metadata = tmp
checks = self . _get_refreshed_check_result ( check_id )
region = self . ta_region or self . conn . _client_config . region_name
res = { }
if checks [ 'result' ] . get ( 'status' , '' ) == 'not_available' :
logger . warning ( 'Trusted Advisor returned status "not_available" for ' 'service limit check; cannot retrieve limits from TA.' )
return { }
if 'flaggedResources' not in checks [ 'result' ] :
logger . warning ( 'Trusted Advisor returned no results for ' 'service limit check; cannot retrieve limits from TA.' )
return { }
for check in checks [ 'result' ] [ 'flaggedResources' ] :
if 'region' in check and check [ 'region' ] != region :
continue
data = dict ( zip ( metadata , check [ 'metadata' ] ) )
if data [ 'Service' ] not in res :
res [ data [ 'Service' ] ] = { }
try :
val = int ( data [ 'Limit Amount' ] )
except ValueError :
val = data [ 'Limit Amount' ]
if val != 'Unlimited' :
logger . error ( 'TrustedAdvisor returned unknown Limit ' 'Amount %s for %s - %s' , val , data [ 'Service' ] , data [ 'Limit Name' ] )
continue
else :
logger . debug ( 'TrustedAdvisor setting explicit "Unlimited" ' 'limit for %s - %s' , data [ 'Service' ] , data [ 'Limit Name' ] )
res [ data [ 'Service' ] ] [ data [ 'Limit Name' ] ] = val
logger . info ( "Finished TrustedAdvisor poll" )
return res
|
def union_rectangles ( R ) :
"""Area of union of rectangles
: param R : list of rectangles defined by ( x1 , y1 , x2 , y2)
where ( x1 , y1 ) is top left corner and ( x2 , y2 ) bottom right corner
: returns : area
: complexity : : math : ` O ( n ^ 2 ) `"""
|
if R == [ ] :
return 0
X = [ ]
Y = [ ]
for j in range ( len ( R ) ) :
( x1 , y1 , x2 , y2 ) = R [ j ]
assert x1 <= x2 and y1 <= y2
X . append ( x1 )
X . append ( x2 )
Y . append ( ( y1 , + 1 , j ) )
# generate events
Y . append ( ( y2 , - 1 , j ) )
X . sort ( )
Y . sort ( )
X2i = { X [ i ] : i for i in range ( len ( X ) ) }
L = [ X [ i + 1 ] - X [ i ] for i in range ( len ( X ) - 1 ) ]
C = Cover_query ( L )
area = 0
last = 0
for ( y , delta , j ) in Y :
area += ( y - last ) * C . cover ( )
last = y
( x1 , y1 , x2 , y2 ) = R [ j ]
i = X2i [ x1 ]
k = X2i [ x2 ]
C . change ( i , k , delta )
return area
|
def destroy ( self , request , project , pk = None ) :
"""Delete bug - job - map entry . pk is a composite key in the form
bug _ id - job _ id"""
|
job_id , bug_id = map ( int , pk . split ( "-" ) )
job = Job . objects . get ( repository__name = project , id = job_id )
BugJobMap . objects . filter ( job = job , bug_id = bug_id ) . delete ( )
return Response ( { "message" : "Bug job map deleted" } )
|
def scan ( self ) :
"""Scan for bluetooth devices ."""
|
try :
res = subprocess . check_output ( [ "hcitool" , "scan" , "--flush" ] , stderr = subprocess . STDOUT )
except subprocess . CalledProcessError :
raise BackendError ( "'hcitool scan' returned error. Make sure " "your bluetooth device is powered up with " "'hciconfig hciX up'." )
devices = [ ]
res = res . splitlines ( ) [ 1 : ]
for _ , bdaddr , name in map ( lambda l : l . split ( b"\t" ) , res ) :
devices . append ( ( bdaddr . decode ( "utf8" ) , name . decode ( "utf8" ) ) )
return devices
|
def character_from_structure ( motivation ) :
"""Find a character for a given structure ."""
|
assert len ( motivation ) == 3
_c = { "+" : "⿰" , "-" : "⿱" , '>' : "⿱" , "手" : "扌" , "人" : "亻" , "刀" : "刂" , "丝" : "糹" , "水" : "氵" , "0" : "⿴" , }
structure = '' . join ( [ _c . get ( x , x ) for x in motivation ] )
return _cd . IDS . get ( structure , '?' )
|
def get_absolute_url_link ( self , text = None , cls = None , icon_class = None , ** attrs ) :
"""Gets the html link for the object ."""
|
if text is None :
text = self . get_link_text ( )
return build_link ( href = self . get_absolute_url ( ) , text = text , cls = cls , icon_class = icon_class , ** attrs )
|
def smooth ( df , window_len = 11 , window = "hanning" ) :
"""Smooth the data using a window with requested size ."""
|
if isinstance ( df , pd . Series ) :
new_df = _smooth ( df , window_len = window_len , window = window )
else :
new_df = df . apply ( _smooth , window_len = window_len , window = window )
return new_df
|
def update_statistics ( Y , P , beta = 0.9 ) :
"""Args
2d array whose columns encode the
activity of the output units
2d array encoding the pairwise average
activity of the output units
Returns
The updated average activities"""
|
( n , d ) = Y . shape
A = np . expand_dims ( Y , axis = 1 ) * np . expand_dims ( Y , axis = 0 )
assert ( A . shape == ( n , n , d ) )
Q = np . mean ( A , axis = 2 )
Q [ np . where ( Q == 0. ) ] = 0.000001
assert ( P . shape == Q . shape )
return beta * P + ( 1 - beta ) * Q
|
def walk_and_clean ( data ) :
"""Recursively walks list of dicts ( which may themselves embed lists and dicts ) ,
transforming namedtuples to OrderedDicts and
using ` ` clean _ key _ name ( k ) ` ` to make keys into SQL - safe column names
> > > data = [ { ' a ' : 1 } , [ { ' B ' : 2 } , { ' B ' : 3 } ] , { ' F ' : { ' G ' : 4 } } ]
> > > pprint ( walk _ and _ clean ( data ) )
[ OrderedDict ( [ ( ' a ' , 1 ) ] ) ,
[ OrderedDict ( [ ( ' b ' , 2 ) ] ) , OrderedDict ( [ ( ' b ' , 3 ) ] ) ] ,
OrderedDict ( [ ( ' f ' , OrderedDict ( [ ( ' g ' , 4 ) ] ) ) ] ) ]"""
|
# transform namedtuples to OrderedDicts
if hasattr ( data , '_fields' ) :
data = OrderedDict ( ( k , v ) for ( k , v ) in zip ( data . _fields , data ) )
# Recursively clean up child dicts and lists
if hasattr ( data , 'items' ) and hasattr ( data , '__setitem__' ) :
for ( key , val ) in data . items ( ) :
data [ key ] = walk_and_clean ( val )
elif isinstance ( data , list ) or isinstance ( data , tuple ) or hasattr ( data , '__next__' ) or hasattr ( data , 'next' ) :
data = [ walk_and_clean ( d ) for d in data ]
# Clean up any keys in this dict itself
if hasattr ( data , 'items' ) :
original_keys = data . keys ( )
tup = ( ( clean_key_name ( k ) , v ) for ( k , v ) in data . items ( ) )
data = OrderedDict ( tup )
if len ( data ) < len ( original_keys ) :
raise KeyError ( 'Cleaning up %s created duplicates' % original_keys )
return data
|
def task_view_user ( self , ) :
"""View the user that is currently selected
: returns : None
: rtype : None
: raises : None"""
|
if not self . cur_task :
return
i = self . task_user_tablev . currentIndex ( )
item = i . internalPointer ( )
if item :
user = item . internal_data ( )
self . view_user ( user )
|
def right_click ( self , data , event_button , event_time ) :
"""Right click handler"""
|
self . menu ( event_button , event_time , data )
|
def sub ( self , * args , ** kwargs ) :
"""sub ( other , rho = 0 , inplace = True )
Subtracts an * other * number instance . The correlation coefficient * rho * can be configured
per uncertainty when passed as a dict . When * inplace * is * False * , a new instance is
returned ."""
|
return self . _apply ( operator . sub , * args , ** kwargs )
|
def hook_key ( key , callback , suppress = False ) :
"""Hooks key up and key down events for a single key . Returns the event handler
created . To remove a hooked key use ` unhook _ key ( key ) ` or
` unhook _ key ( handler ) ` .
Note : this function shares state with hotkeys , so ` clear _ all _ hotkeys `
affects it aswell ."""
|
_listener . start_if_necessary ( )
store = _listener . blocking_keys if suppress else _listener . nonblocking_keys
scan_codes = key_to_scan_codes ( key )
for scan_code in scan_codes :
store [ scan_code ] . append ( callback )
def remove_ ( ) :
del _hooks [ callback ]
del _hooks [ key ]
del _hooks [ remove_ ]
for scan_code in scan_codes :
store [ scan_code ] . remove ( callback )
_hooks [ callback ] = _hooks [ key ] = _hooks [ remove_ ] = remove_
return remove_
|
def copy ( self , dst , ** kwargs ) :
"""Copy file to a new destination .
Returns JSON Patch with proposed change pointing to new copy ."""
|
_fs , filename = opener . parse ( self . uri )
_fs_dst , filename_dst = opener . parse ( dst )
copyfile ( _fs , filename , _fs_dst , filename_dst , ** kwargs )
return [ { 'op' : 'replace' , 'path' : self . pointer , 'value' : dst } ]
|
def _read_para_from ( self , code , cbit , clen , * , desc , length , version ) :
"""Read HIP FROM parameter .
Structure of HIP FROM parameter [ RFC 8004 ] :
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
| Type | Length |
| Address |
Octets Bits Name Description
0 0 from . type Parameter Type
1 15 from . critical Critical Bit
2 16 from . length Length of Contents
4 32 from . ip Address"""
|
if clen != 16 :
raise ProtocolError ( f'HIPv{version}: [Parano {code}] invalid format' )
_addr = self . _read_fileng ( 16 )
from_ = dict ( type = desc , critical = cbit , length = clen , ip = ipaddress . ip_address ( _addr ) , )
return from_
|
def getattrd ( obj , name , default = sentinel ) :
"""Same as getattr ( ) , but allows dot notation lookup
Source : http : / / stackoverflow . com / a / 14324459"""
|
try :
return functools . reduce ( getattr , name . split ( "." ) , obj )
except AttributeError as e :
if default is not sentinel :
return default
raise
|
def msg_curse ( self , args = None , max_width = None ) :
"""Return the dict to display in the curse interface ."""
|
# Init the return message
ret = [ ]
# Only process if stats exist and display plugin enable . . .
if not self . stats or self . is_disable ( ) :
return ret
# Max size for the interface name
name_max_width = max_width - 12
# Header
msg = '{:{width}}' . format ( 'SENSORS' , width = name_max_width )
ret . append ( self . curse_add_line ( msg , "TITLE" ) )
# Stats
for i in self . stats : # Do not display anything if no battery are detected
if i [ 'type' ] == 'battery' and i [ 'value' ] == [ ] :
continue
# New line
ret . append ( self . curse_new_line ( ) )
msg = '{:{width}}' . format ( i [ "label" ] [ : name_max_width ] , width = name_max_width )
ret . append ( self . curse_add_line ( msg ) )
if i [ 'value' ] in ( b'ERR' , b'SLP' , b'UNK' , b'NOS' ) :
msg = '{:>13}' . format ( i [ 'value' ] )
ret . append ( self . curse_add_line ( msg , self . get_views ( item = i [ self . get_key ( ) ] , key = 'value' , option = 'decoration' ) ) )
else :
if ( args . fahrenheit and i [ 'type' ] != 'battery' and i [ 'type' ] != 'fan_speed' ) :
value = to_fahrenheit ( i [ 'value' ] )
unit = 'F'
else :
value = i [ 'value' ]
unit = i [ 'unit' ]
try :
msg = '{:>13.0f}{}' . format ( value , unit )
ret . append ( self . curse_add_line ( msg , self . get_views ( item = i [ self . get_key ( ) ] , key = 'value' , option = 'decoration' ) ) )
except ( TypeError , ValueError ) :
pass
return ret
|
def toDict ( self ) :
"""Get information about the titles alignments as a dictionary .
@ return : A C { dict } representation of the titles aligments ."""
|
return { 'scoreClass' : self . scoreClass . __name__ , 'titles' : dict ( ( title , titleAlignments . toDict ( ) ) for title , titleAlignments in self . items ( ) ) , }
|
def register_recipe ( cls , recipe ) :
"""Registers a dftimewolf recipe .
Args :
recipe : imported python module representing the recipe ."""
|
recipe_name = recipe . contents [ 'name' ]
cls . _recipe_classes [ recipe_name ] = ( recipe . contents , recipe . args , recipe . __doc__ )
|
def _linux_skype_status ( status , message ) :
"""Updates status and message for Skype IM application on Linux .
` status `
Status type .
` message `
Status message ."""
|
try :
iface = _dbus_get_interface ( 'com.Skype.API' , '/com/Skype' , 'com.Skype.API' )
if iface : # authenticate
if iface . Invoke ( 'NAME focus' ) != 'OK' :
msg = 'User denied authorization'
raise dbus . exceptions . DbusException ( msg )
iface . Invoke ( 'PROTOCOL 5' )
# set status
iface . Invoke ( 'SET USERSTATUS {0}' . format ( SKYPE_CODE_MAP [ status ] ) )
# set the message , if provided
iface . Invoke ( 'SET PROFILE MOOD_TEXT {0}' . format ( message ) )
except dbus . exceptions . DBusException :
pass
|
def common_mean_watson ( Data1 , Data2 , NumSims = 5000 , print_result = True , plot = 'no' , save = False , save_folder = '.' , fmt = 'svg' ) :
"""Conduct a Watson V test for a common mean on two directional data sets .
This function calculates Watson ' s V statistic from input files through
Monte Carlo simulation in order to test whether two populations of
directional data could have been drawn from a common mean . The critical
angle between the two sample mean directions and the corresponding
McFadden and McElhinny ( 1990 ) classification is printed .
Parameters
Data1 : a nested list of directional data [ dec , inc ] ( a di _ block )
Data2 : a nested list of directional data [ dec , inc ] ( a di _ block )
NumSims : number of Monte Carlo simulations ( default is 5000)
print _ result : default is to print the test result ( True )
plot : the default is no plot ( ' no ' ) . Putting ' yes ' will the plot the CDF
from the Monte Carlo simulations .
save : optional save of plots ( default is False )
save _ folder : path to where plots will be saved ( default is current )
fmt : format of figures to be saved ( default is ' svg ' )
Returns
printed text : text describing the test result is printed
result : a boolean where 0 is fail and 1 is pass
angle : angle between the Fisher means of the two data sets
critical _ angle : critical angle for the test to pass
Examples
Develop two populations of directions using ` ` ipmag . fishrot ` ` . Use the
function to determine if they share a common mean .
> > > directions _ A = ipmag . fishrot ( k = 20 , n = 30 , dec = 40 , inc = 60)
> > > directions _ B = ipmag . fishrot ( k = 35 , n = 25 , dec = 42 , inc = 57)
> > > ipmag . common _ mean _ watson ( directions _ A , directions _ B )"""
|
pars_1 = pmag . fisher_mean ( Data1 )
pars_2 = pmag . fisher_mean ( Data2 )
cart_1 = pmag . dir2cart ( [ pars_1 [ "dec" ] , pars_1 [ "inc" ] , pars_1 [ "r" ] ] )
cart_2 = pmag . dir2cart ( [ pars_2 [ 'dec' ] , pars_2 [ 'inc' ] , pars_2 [ "r" ] ] )
Sw = pars_1 [ 'k' ] * pars_1 [ 'r' ] + pars_2 [ 'k' ] * pars_2 [ 'r' ]
# k1 * r1 + k2 * r2
xhat_1 = pars_1 [ 'k' ] * cart_1 [ 0 ] + pars_2 [ 'k' ] * cart_2 [ 0 ]
# k1 * x1 + k2 * x2
xhat_2 = pars_1 [ 'k' ] * cart_1 [ 1 ] + pars_2 [ 'k' ] * cart_2 [ 1 ]
# k1 * y1 + k2 * y2
xhat_3 = pars_1 [ 'k' ] * cart_1 [ 2 ] + pars_2 [ 'k' ] * cart_2 [ 2 ]
# k1 * z1 + k2 * z2
Rw = np . sqrt ( xhat_1 ** 2 + xhat_2 ** 2 + xhat_3 ** 2 )
V = 2 * ( Sw - Rw )
# keep weighted sum for later when determining the " critical angle "
# let ' s save it as Sr ( notation of McFadden and McElhinny , 1990)
Sr = Sw
# do monte carlo simulation of datasets with same kappas as data ,
# but a common mean
counter = 0
Vp = [ ]
# set of Vs from simulations
for k in range ( NumSims ) : # get a set of N1 fisher distributed vectors with k1,
# calculate fisher stats
Dirp = [ ]
for i in range ( pars_1 [ "n" ] ) :
Dirp . append ( pmag . fshdev ( pars_1 [ "k" ] ) )
pars_p1 = pmag . fisher_mean ( Dirp )
# get a set of N2 fisher distributed vectors with k2,
# calculate fisher stats
Dirp = [ ]
for i in range ( pars_2 [ "n" ] ) :
Dirp . append ( pmag . fshdev ( pars_2 [ "k" ] ) )
pars_p2 = pmag . fisher_mean ( Dirp )
# get the V for these
Vk = pmag . vfunc ( pars_p1 , pars_p2 )
Vp . append ( Vk )
# sort the Vs , get Vcrit ( 95th percentile one )
Vp . sort ( )
k = int ( .95 * NumSims )
Vcrit = Vp [ k ]
# equation 18 of McFadden and McElhinny , 1990 calculates the critical
# value of R ( Rwc )
Rwc = Sr - ( old_div ( Vcrit , 2 ) )
# following equation 19 of McFadden and McElhinny ( 1990 ) the critical
# angle is calculated . If the observed angle ( also calculated below )
# between the data set means exceeds the critical angle the hypothesis
# of a common mean direction may be rejected at the 95 % confidence
# level . The critical angle is simply a different way to present
# Watson ' s V parameter so it makes sense to use the Watson V parameter
# in comparison with the critical value of V for considering the test
# results . What calculating the critical angle allows for is the
# classification of McFadden and McElhinny ( 1990 ) to be made
# for data sets that are consistent with sharing a common mean .
k1 = pars_1 [ 'k' ]
k2 = pars_2 [ 'k' ]
R1 = pars_1 [ 'r' ]
R2 = pars_2 [ 'r' ]
critical_angle = np . degrees ( np . arccos ( old_div ( ( ( Rwc ** 2 ) - ( ( k1 * R1 ) ** 2 ) - ( ( k2 * R2 ) ** 2 ) ) , ( 2 * k1 * R1 * k2 * R2 ) ) ) )
D1 = ( pars_1 [ 'dec' ] , pars_1 [ 'inc' ] )
D2 = ( pars_2 [ 'dec' ] , pars_2 [ 'inc' ] )
angle = pmag . angle ( D1 , D2 )
if print_result == True :
print ( "Results of Watson V test: " )
print ( "" )
print ( "Watson's V: " '%.1f' % ( V ) )
print ( "Critical value of V: " '%.1f' % ( Vcrit ) )
if V < Vcrit :
if print_result == True :
print ( '"Pass": Since V is less than Vcrit, the null hypothesis' )
print ( 'that the two populations are drawn from distributions' )
print ( 'that share a common mean direction can not be rejected.' )
result = 1
elif V > Vcrit :
if print_result == True :
print ( '"Fail": Since V is greater than Vcrit, the two means can' )
print ( 'be distinguished at the 95% confidence level.' )
result = 0
if print_result == True :
print ( "" )
print ( "M&M1990 classification:" )
print ( "" )
print ( "Angle between data set means: " '%.1f' % ( angle ) )
print ( "Critical angle for M&M1990: " '%.1f' % ( critical_angle ) )
if print_result == True :
if V > Vcrit :
print ( "" )
elif V < Vcrit :
if critical_angle < 5 :
print ( "The McFadden and McElhinny (1990) classification for" )
print ( "this test is: 'A'" )
elif critical_angle < 10 :
print ( "The McFadden and McElhinny (1990) classification for" )
print ( "this test is: 'B'" )
elif critical_angle < 20 :
print ( "The McFadden and McElhinny (1990) classification for" )
print ( "this test is: 'C'" )
else :
print ( "The McFadden and McElhinny (1990) classification for" )
print ( "this test is: 'INDETERMINATE;" )
if plot == 'yes' :
CDF = { 'cdf' : 1 }
# pmagplotlib . plot _ init ( CDF [ ' cdf ' ] , 5,5)
plt . figure ( figsize = ( 3.5 , 2.5 ) )
p1 = pmagplotlib . plot_cdf ( CDF [ 'cdf' ] , Vp , "Watson's V" , 'r' , "" )
p2 = pmagplotlib . plot_vs ( CDF [ 'cdf' ] , [ V ] , 'g' , '-' )
p3 = pmagplotlib . plot_vs ( CDF [ 'cdf' ] , [ Vp [ k ] ] , 'b' , '--' )
# pmagplotlib . draw _ figs ( CDF )
if save == True :
plt . savefig ( os . path . join ( save_folder , 'common_mean_watson' ) + '.' + fmt )
pmagplotlib . show_fig ( CDF [ 'cdf' ] )
return result , angle [ 0 ] , critical_angle
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.