signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def authenticate_glance_admin ( self , keystone , force_v1_client = False ) :
"""Authenticates admin user with glance .""" | self . log . debug ( 'Authenticating glance admin...' )
ep = keystone . service_catalog . url_for ( service_type = 'image' , interface = 'adminURL' )
if not force_v1_client and keystone . session :
return glance_clientv2 . Client ( "2" , session = keystone . session )
else :
return glance_client . Client ( ep , token = keystone . auth_token ) |
def stats ( ) :
'''Runs the quotastats command , and returns the parsed output
CLI Example :
. . code - block : : bash
salt ' * ' quota . stats''' | ret = { }
out = __salt__ [ 'cmd.run' ] ( 'quotastats' ) . splitlines ( )
for line in out :
if not line :
continue
comps = line . split ( ': ' )
ret [ comps [ 0 ] ] = comps [ 1 ]
return ret |
def get_slugignores ( root , fname = '.slugignore' ) :
"""Given a root path , read any . slugignore file inside and return a list of
patterns that should be removed prior to slug compilation .
Return empty list if file does not exist .""" | try :
with open ( os . path . join ( root , fname ) ) as f :
return [ l . rstrip ( '\n' ) for l in f ]
except IOError :
return [ ] |
def from_xy ( cls , x_array , y_array ) :
"""Create a dataset from two arrays of data .
: note : infering the dimensions for the first elements of each array .""" | if len ( x_array ) == 0 :
raise ValueError ( "data array is empty." )
dim_x , dim_y = len ( x_array [ 0 ] ) , len ( y_array [ 0 ] )
dataset = cls ( dim_x , dim_y )
for x , y in zip ( x_array , y_array ) :
assert len ( x ) == dim_x and len ( y ) == dim_y
dataset . add_xy ( x , y )
return dataset |
def copy_texture_memory_args ( self , texmem_args ) :
"""adds texture memory arguments to the most recently compiled module , if using CUDA""" | if self . lang == "CUDA" :
self . dev . copy_texture_memory_args ( texmem_args )
else :
raise Exception ( "Error cannot copy texture memory arguments when language is not CUDA" ) |
def register_list_command ( self , list_func ) :
"""Add ' list ' command to get a list of projects or details about one project .
: param list _ func : function : run when user choses this option .""" | description = "Show a list of project names or folders/files of a single project."
list_parser = self . subparsers . add_parser ( 'list' , description = description )
project_name_or_auth_role = list_parser . add_mutually_exclusive_group ( required = False )
_add_project_filter_auth_role_arg ( project_name_or_auth_role )
add_project_name_or_id_arg ( project_name_or_auth_role , required = False , help_text_suffix = "show details for" )
_add_long_format_option ( list_parser , 'Display long format.' )
list_parser . set_defaults ( func = list_func ) |
def simplify ( self ) :
"""Reorganize the ranges in the set in order to ensure that each range
is unique and that there is not overlap between to ranges .""" | # Sort the ranges
self . __range . sort ( )
new_range = [ ]
new_first = self . __range [ 0 ] . first
new_count = self . __range [ 0 ] . count
for r in self . __range :
if r . first == new_first : # Longest range starting at new _ first
new_count = r . count
elif r . first <= new_first + new_count : # Overlapping ranges
if new_first + new_count - 1 < r . last : # There is a part of the range to add to the new range
new_count = r . last - new_first + 1
else : # No overlap , this is a new disjoint range
new_range . append ( IdRange ( new_first , new_count ) )
new_first = r . first
new_count = r . count
# End of the last range
new_range . append ( IdRange ( new_first , new_count ) )
self . __range = new_range |
def build_from_info ( cls , info ) :
"""build a TensorTerm instance from a dict
Parameters
cls : class
info : dict
contains all information needed to build the term
Return
TensorTerm instance""" | terms = [ ]
for term_info in info [ 'terms' ] :
terms . append ( SplineTerm . build_from_info ( term_info ) )
return cls ( * terms ) |
def register ( self , ModelClass , form_field = None , widget = None , title = None , prefix = None , has_id_value = True ) :
"""Register a custom model with the ` ` AnyUrlField ` ` .""" | if any ( urltype . model == ModelClass for urltype in self . _url_types ) :
raise ValueError ( "Model is already registered: '{0}'" . format ( ModelClass ) )
opts = ModelClass . _meta
opts = opts . concrete_model . _meta
if not prefix : # Store something descriptive , easier to lookup from raw database content .
prefix = '{0}.{1}' . format ( opts . app_label , opts . object_name . lower ( ) )
if not title :
title = ModelClass . _meta . verbose_name
if self . is_external_url_prefix ( prefix ) :
raise ValueError ( "Invalid prefix value: '{0}'." . format ( prefix ) )
if self [ prefix ] is not None :
raise ValueError ( "Prefix is already registered: '{0}'" . format ( prefix ) )
if form_field is not None and widget is not None :
raise ValueError ( "Provide either a form_field or widget; use the widget parameter of the form field instead." )
urltype = UrlType ( ModelClass , form_field , widget , title , prefix , has_id_value )
signals . post_save . connect ( _on_model_save , sender = ModelClass )
self . _url_types . append ( urltype )
return urltype |
def _StubMethod ( self , stub , method_descriptor , rpc_controller , request , callback ) :
"""The body of all service methods in the generated stub class .
Args :
stub : Stub instance .
method _ descriptor : Descriptor of the invoked method .
rpc _ controller : Rpc controller to execute the method .
request : Request protocol message .
callback : A callback to execute when the method finishes .
Returns :
Response message ( in case of blocking call ) .""" | return stub . rpc_channel . CallMethod ( method_descriptor , rpc_controller , request , method_descriptor . output_type . _concrete_class , callback ) |
def sealedbox_encrypt ( data , ** kwargs ) :
'''Encrypt data using a public key generated from ` nacl . keygen ` .
The encryptd data can be decrypted using ` nacl . sealedbox _ decrypt ` only with the secret key .
CLI Examples :
. . code - block : : bash
salt - run nacl . sealedbox _ encrypt datatoenc
salt - call - - local nacl . sealedbox _ encrypt datatoenc pk _ file = / etc / salt / pki / master / nacl . pub
salt - call - - local nacl . sealedbox _ encrypt datatoenc pk = ' vrwQF7cNiNAVQVAiS3bvcbJUnF0cN6fU9YTZD9mBfzQ = ' ''' | kwargs [ 'opts' ] = __opts__
return salt . utils . nacl . sealedbox_encrypt ( data , ** kwargs ) |
def create_pool ( self ) :
"""Return a ConnectionPool instance of given host
: param socket _ timeout :
socket timeout for each connection in seconds""" | service = self . dao . service_name ( )
ca_certs = self . dao . get_setting ( "CA_BUNDLE" , "/etc/ssl/certs/ca-bundle.crt" )
cert_file = self . dao . get_service_setting ( "CERT_FILE" , None )
host = self . dao . get_service_setting ( "HOST" )
key_file = self . dao . get_service_setting ( "KEY_FILE" , None )
max_pool_size = int ( self . dao . get_service_setting ( "POOL_SIZE" , 10 ) )
socket_timeout = int ( self . dao . get_service_setting ( "TIMEOUT" , 2 ) )
verify_https = self . dao . get_service_setting ( "VERIFY_HTTPS" )
if verify_https is None :
verify_https = True
kwargs = { "retries" : Retry ( total = 1 , connect = 0 , read = 0 , redirect = 1 ) , "timeout" : socket_timeout , "maxsize" : max_pool_size , "block" : True , }
if key_file is not None and cert_file is not None :
kwargs [ "key_file" ] = key_file
kwargs [ "cert_file" ] = cert_file
if urlparse ( host ) . scheme == "https" :
kwargs [ "ssl_version" ] = self . dao . get_service_setting ( "SSL_VERSION" , ssl . PROTOCOL_TLSv1 )
if verify_https :
kwargs [ "cert_reqs" ] = "CERT_REQUIRED"
kwargs [ "ca_certs" ] = ca_certs
return connection_from_url ( host , ** kwargs ) |
def open_any ( filename ) :
"""Helper to open also compressed files""" | if filename . endswith ( ".gz" ) :
return gzip . open
if filename . endswith ( ".bz2" ) :
return bz2 . BZ2File
return open |
def validate ( self , schema = None ) :
"""Validate that we have a valid object .
On error , this will raise a ` ScrapeValueError `
This also expects that the schemas assume that omitting required
in the schema asserts the field is optional , not required . This is
due to upstream schemas being in JSON Schema v3 , and not validictory ' s
modified syntax .
^ TODO : FIXME""" | if schema is None :
schema = self . _schema
type_checker = Draft3Validator . TYPE_CHECKER . redefine ( "datetime" , lambda c , d : isinstance ( d , ( datetime . date , datetime . datetime ) ) )
ValidatorCls = jsonschema . validators . extend ( Draft3Validator , type_checker = type_checker )
validator = ValidatorCls ( schema , format_checker = FormatChecker ( ) )
errors = [ str ( error ) for error in validator . iter_errors ( self . as_dict ( ) ) ]
if errors :
raise ScrapeValueError ( 'validation of {} {} failed: {}' . format ( self . __class__ . __name__ , self . _id , '\n\t' + '\n\t' . join ( errors ) ) ) |
def numerical_components ( self ) :
""": return : lambda functions of each of the analytical components in
model _ dict , to be used in numerical calculation .""" | Ans = variabletuple ( 'Ans' , self . keys ( ) )
# All components must feature the independent vars and params , that ' s
# the API convention . But for those components which also contain
# interdependence , we add those vars
components = [ ]
for var , expr in self . items ( ) :
dependencies = self . connectivity_mapping [ var ]
# vars first , then params , and alphabetically within each group
key = lambda arg : [ isinstance ( arg , Parameter ) , str ( arg ) ]
ordered = sorted ( dependencies , key = key )
components . append ( sympy_to_py ( expr , ordered ) )
return Ans ( * components ) |
def _preoptimize_model ( self ) :
"""Preoptimizes the model by estimating a Gaussian state space models
Returns
- Gaussian model latent variable object""" | gaussian_model = LLT ( self . data , integ = self . integ , target = self . target )
gaussian_model . fit ( )
self . latent_variables . z_list [ 0 ] . start = gaussian_model . latent_variables . get_z_values ( ) [ 1 ]
self . latent_variables . z_list [ 1 ] . start = gaussian_model . latent_variables . get_z_values ( ) [ 2 ]
if self . model_name2 == 't' :
def temp_function ( params ) :
return - np . sum ( ss . t . logpdf ( x = self . data , df = np . exp ( params [ 0 ] ) , loc = np . ones ( self . data . shape [ 0 ] ) * params [ 1 ] , scale = np . exp ( params [ 2 ] ) ) )
p = optimize . minimize ( temp_function , np . array ( [ 2.0 , 0.0 , - 1.0 ] ) , method = 'L-BFGS-B' )
self . latent_variables . z_list [ 2 ] . start = p . x [ 2 ]
self . latent_variables . z_list [ 3 ] . start = p . x [ 0 ]
elif self . model_name2 == 'Skewt' :
def temp_function ( params ) :
return - np . sum ( fam . Skewt . logpdf_internal ( x = self . data , df = np . exp ( params [ 0 ] ) , loc = np . ones ( self . data . shape [ 0 ] ) * params [ 1 ] , scale = np . exp ( params [ 2 ] ) , gamma = np . exp ( params [ 3 ] ) ) )
p = optimize . minimize ( temp_function , np . array ( [ 2.0 , 0.0 , - 1.0 , 0.0 ] ) , method = 'L-BFGS-B' )
self . latent_variables . z_list [ 2 ] . start = p . x [ 3 ]
self . latent_variables . z_list [ 3 ] . start = p . x [ 2 ]
self . latent_variables . z_list [ 4 ] . start = p . x [ 0 ]
return gaussian_model . latent_variables |
def barplot ( bars , title = '' , upColor = 'blue' , downColor = 'red' ) :
"""Create candlestick plot for the given bars . The bars can be given as
a DataFrame or as a list of bar objects .""" | import pandas as pd
import matplotlib . pyplot as plt
from matplotlib . lines import Line2D
from matplotlib . patches import Rectangle
if isinstance ( bars , pd . DataFrame ) :
ohlcTups = [ tuple ( v ) for v in bars [ [ 'open' , 'high' , 'low' , 'close' ] ] . values ]
elif bars and hasattr ( bars [ 0 ] , 'open_' ) :
ohlcTups = [ ( b . open_ , b . high , b . low , b . close ) for b in bars ]
else :
ohlcTups = [ ( b . open , b . high , b . low , b . close ) for b in bars ]
fig , ax = plt . subplots ( )
ax . set_title ( title )
ax . grid ( True )
fig . set_size_inches ( 10 , 6 )
for n , ( open_ , high , low , close ) in enumerate ( ohlcTups ) :
if close >= open_ :
color = upColor
bodyHi , bodyLo = close , open_
else :
color = downColor
bodyHi , bodyLo = open_ , close
line = Line2D ( xdata = ( n , n ) , ydata = ( low , bodyLo ) , color = color , linewidth = 1 )
ax . add_line ( line )
line = Line2D ( xdata = ( n , n ) , ydata = ( high , bodyHi ) , color = color , linewidth = 1 )
ax . add_line ( line )
rect = Rectangle ( xy = ( n - 0.3 , bodyLo ) , width = 0.6 , height = bodyHi - bodyLo , edgecolor = color , facecolor = color , alpha = 0.4 , antialiased = True )
ax . add_patch ( rect )
ax . autoscale_view ( )
return fig |
def do ( cmdline = None , runas = None ) :
'''Execute a python command with pyenv ' s shims from the user or the system .
CLI Example :
. . code - block : : bash
salt ' * ' pyenv . do ' gem list bundler '
salt ' * ' pyenv . do ' gem list bundler ' deploy''' | path = _pyenv_path ( runas )
cmd_split = cmdline . split ( )
quoted_line = ''
for cmd in cmd_split :
quoted_line = quoted_line + ' ' + _cmd_quote ( cmd )
result = __salt__ [ 'cmd.run_all' ] ( 'env PATH={0}/shims:$PATH {1}' . format ( _cmd_quote ( path ) , quoted_line ) , runas = runas , python_shell = True )
if result [ 'retcode' ] == 0 :
rehash ( runas = runas )
return result [ 'stdout' ]
else :
return False |
def device_key ( self , device_key ) :
"""Sets the device _ key of this DeviceData .
The fingerprint of the device certificate .
: param device _ key : The device _ key of this DeviceData .
: type : str""" | if device_key is not None and len ( device_key ) > 512 :
raise ValueError ( "Invalid value for `device_key`, length must be less than or equal to `512`" )
self . _device_key = device_key |
def reject_outliers ( a , threshold = 3.5 ) :
"""Iglewicz and Hoaglin ' s robust test for multiple outliers ( two sided test ) .
< http : / / www . itl . nist . gov / div898 / handbook / eda / section3 / eda35h . htm >
See also :
< http : / / contchart . com / outliers . aspx >
> > > a = [ 0 , 1 , 2 , 4 , 12 , 58 , 188 , 189]
> > > list ( reject _ outliers ( a ) )
[ False , False , False , False , False , True , True , True ]""" | if len ( a ) < 3 :
return np . zeros ( len ( a ) , dtype = bool )
A = np . array ( a , dtype = float )
lb , ub = outlier_cutoff ( A , threshold = threshold )
return np . logical_or ( A > ub , A < lb ) |
def _sanity_check_files ( item , files ) :
"""Ensure input files correspond with supported approaches .
Handles BAM , fastqs , plus split fastqs .""" | msg = None
file_types = set ( [ ( "bam" if x . endswith ( ".bam" ) else "fastq" ) for x in files if x ] )
if len ( file_types ) > 1 :
msg = "Found multiple file types (BAM and fastq)"
file_type = file_types . pop ( )
if file_type == "bam" :
if len ( files ) != 1 :
msg = "Expect a single BAM file input as input"
elif file_type == "fastq" :
if len ( files ) not in [ 1 , 2 ] and item [ "analysis" ] . lower ( ) != "scrna-seq" :
pair_types = set ( [ len ( xs ) for xs in fastq . combine_pairs ( files ) ] )
if len ( pair_types ) != 1 or pair_types . pop ( ) not in [ 1 , 2 ] :
msg = "Expect either 1 (single end) or 2 (paired end) fastq inputs"
if len ( files ) == 2 and files [ 0 ] == files [ 1 ] :
msg = "Expect both fastq files to not be the same"
if msg :
raise ValueError ( "%s for %s: %s" % ( msg , item . get ( "description" , "" ) , files ) ) |
def run ( self , host = None , port = None , debug = None , workers = None ) :
"""启动
: param host : 监听IP
: param port : 监听端口
: param debug : 是否debug
: param workers : workers数量
: return :""" | self . _validate_cmds ( )
if host is None :
host = constants . SERVER_HOST
if port is None :
port = constants . SERVER_PORT
if debug is not None :
self . debug = debug
workers = workers if workers is not None else 1
logger . info ( 'Running server on %s, debug: %s, workers: %s' , ( host , port ) , self . debug , workers )
self . _prepare_server ( ( host , port ) )
setproctitle . setproctitle ( self . _make_proc_name ( 'master' ) )
# 只能在主线程里面设置signals
self . _handle_parent_proc_signals ( )
self . _spawn_workers ( workers , self . _worker_run ) |
def raise_check_result ( self ) :
"""Raise ACTIVE CHECK RESULT entry
Example : " ACTIVE SERVICE CHECK : server ; DOWN ; HARD ; 1 ; I don ' t know what to say . . . "
: return : None""" | if not self . __class__ . log_active_checks :
return
log_level = 'info'
if self . state in [ u'WARNING' , u'UNREACHABLE' ] :
log_level = 'warning'
elif self . state == u'CRITICAL' :
log_level = 'error'
brok = make_monitoring_log ( log_level , 'ACTIVE SERVICE CHECK: %s;%s;%s;%d;%s' % ( self . host_name , self . get_name ( ) , self . state , self . attempt , self . output ) )
self . broks . append ( brok ) |
def initialize ( self , runtime = None ) :
"""Initializes this manager .
A manager is initialized once at the time of creation .
arg : runtime ( osid . OsidRuntimeManager ) : the runtime
environment
raise : CONFIGURATION _ ERROR - an error with implementation
configuration
raise : ILLEGAL _ STATE - this manager has already been
initialized by the OsidRuntime
raise : NullArgument - runtime is null
raise : OperationFailed - unable to complete request
compliance : mandatory - This method must be implemented .
implementation notes : In addition to loading its runtime
configuration an implementation may create shared resources such
as connection pools to be shared among all sessions of this
service and released when this manager is closed . Providers must
thread - protect any data stored in the manager . To maximize
interoperability , providers should not honor a second call to
initialize ( ) and must set an ILLEGAL _ STATE error .""" | if self . _runtime is not None :
raise IllegalState ( )
self . _runtime = runtime
config = runtime . get_configuration ( )
parameter_id = Id ( 'parameter:hostName@dlkit_service' )
host = config . get_value_by_parameter ( parameter_id ) . get_string_value ( )
if host is not None :
self . _host = host
parameter_id = Id ( 'parameter:appKey@dlkit_service' )
app_key = config . get_value_by_parameter ( parameter_id ) . get_string_value ( )
if app_key is not None :
self . _app_key = app_key |
def parse ( readDataInstance ) :
"""Returns a new L { NetMetaDataStreamEntry } object .
@ type readDataInstance : L { ReadData }
@ param readDataInstance : A L { ReadData } object with data to be parsed as a L { NetMetaDataStreamEntry } .
@ rtype : L { NetMetaDataStreamEntry }
@ return : A new L { NetMetaDataStreamEntry } object .""" | n = NetMetaDataStreamEntry ( )
n . offset . value = readDataInstance . readDword ( )
n . size . value = readDataInstance . readDword ( )
n . name . value = readDataInstance . readAlignedString ( )
return n |
def cache ( self , con ) :
"""Put a dedicated connection back into the idle cache .""" | if self . _maxusage and con . usage_count > self . _maxusage :
self . _connections -= 1
logging . debug ( 'dropping connection %s uses past max usage %s' % ( con . usage_count , self . _maxusage ) )
con . _close ( )
return
self . _condition . acquire ( )
if con in self . _idle_cache : # called via socket close on a connection in the idle cache
self . _condition . release ( )
return
try :
if not self . _maxcached or len ( self . _idle_cache ) < self . _maxcached : # the idle cache is not full , so put it there
self . _idle_cache . append ( con )
else : # if the idle cache is already full ,
logging . debug ( 'dropping connection. connection pool (%s) is full. maxcached %s' % ( len ( self . _idle_cache ) , self . _maxcached ) )
con . _close ( )
# then close the connection
self . _condition . notify ( )
finally :
self . _connections -= 1
self . _condition . release ( ) |
def _config2indy ( self , config : dict ) -> dict :
"""Given a configuration dict with indy and possibly more configuration values , return the
corresponding indy wallet configuration dict from current default and input values .
: param config : input configuration
: return : configuration dict for indy wallet""" | assert { 'name' , 'id' } & { k for k in config }
return { 'id' : config . get ( 'name' , config . get ( 'id' ) ) , 'storage_type' : config . get ( 'storage_type' , self . default_storage_type ) , 'freshness_time' : config . get ( 'freshness_time' , self . default_freshness_time ) } |
def nacm_rule_list_rule_access_operations ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
nacm = ET . SubElement ( config , "nacm" , xmlns = "urn:ietf:params:xml:ns:yang:ietf-netconf-acm" )
rule_list = ET . SubElement ( nacm , "rule-list" )
name_key = ET . SubElement ( rule_list , "name" )
name_key . text = kwargs . pop ( 'name' )
rule = ET . SubElement ( rule_list , "rule" )
name_key = ET . SubElement ( rule , "name" )
name_key . text = kwargs . pop ( 'name' )
access_operations = ET . SubElement ( rule , "access-operations" )
access_operations . text = kwargs . pop ( 'access_operations' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def string_relax ( start , end , V , n_images = 25 , dr = None , h = 3.0 , k = 0.17 , min_iter = 100 , max_iter = 10000 , max_tol = 5e-6 ) :
"""Implements path relaxation via the elastic band method . In general , the
method is to define a path by a set of points ( images ) connected with
bands with some elasticity constant k . The images then relax along the
forces found in the potential field V , counterbalanced by the elastic
response of the elastic band . In general the endpoints of the band can
be allowed to relax also to their local minima , but in this calculation
they are kept fixed .
Args :
start , end : Endpoints of the path calculation given in discrete
coordinates with respect to the grid in V
V : potential field through which to calculate the path
n _ images : number of images used to define the path . In general
anywhere from 20 to 40 seems to be good .
dr : Conversion ratio from discrete coordinates to real coordinates
for each of the three coordinate vectors
h : Step size for the relaxation . h = 0.1 works reliably , but is
slow . h = 10 diverges with large gradients but for the types of
gradients seen in CHGCARs , works pretty reliably
k : Elastic constant for the band ( in real units , not discrete )
min _ iter , max _ iter : Number of optimization steps the string will
take before exiting ( even if unconverged )
max _ tol : Convergence threshold such that if the string moves by
less than max _ tol in a step , and at least min _ iter steps have
passed , the algorithm will terminate . Depends strongly on the
size of the gradients in V , but 5e - 6 works reasonably well for
CHGCARs .""" | # This code is based on the MATLAB example provided by
# Prof . Eric Vanden - Eijnden of NYU
# ( http : / / www . cims . nyu . edu / ~ eve2 / main . htm )
# print ( " Getting path from { } to { } ( coords wrt V grid ) " . format ( start , end ) )
# Set parameters
if not dr :
dr = np . array ( [ 1.0 / V . shape [ 0 ] , 1.0 / V . shape [ 1 ] , 1.0 / V . shape [ 2 ] ] )
else :
dr = np . array ( dr , dtype = float )
keff = k * dr * n_images
h0 = h
# Initialize string
g1 = np . linspace ( 0 , 1 , n_images )
s0 = start
s1 = end
s = np . array ( [ g * ( s1 - s0 ) for g in g1 ] ) + s0
ds = s - np . roll ( s , 1 , axis = 0 )
ds [ 0 ] = ( ds [ 0 ] - ds [ 0 ] )
ls = np . cumsum ( la . norm ( ds , axis = 1 ) )
ls = ls / ls [ - 1 ]
fi = interp1d ( ls , s , axis = 0 )
s = fi ( g1 )
# Evaluate initial distances ( for elastic equilibrium )
ds0_plus = s - np . roll ( s , 1 , axis = 0 )
ds0_minus = s - np . roll ( s , - 1 , axis = 0 )
ds0_plus [ 0 ] = ( ds0_plus [ 0 ] - ds0_plus [ 0 ] )
ds0_minus [ - 1 ] = ( ds0_minus [ - 1 ] - ds0_minus [ - 1 ] )
# Evaluate potential gradient outside the loop , as potential does not
# change per step in this approximation .
dV = np . gradient ( V )
# Evolve string
for step in range ( 0 , max_iter ) :
if step > min_iter : # Gradually decay step size to prevent oscillations
h = h0 * np . exp ( - 2.0 * ( step - min_iter ) / max_iter )
else :
h = h0
# Calculate forces acting on string
d = V . shape
s0 = s
edV = np . array ( [ [ dV [ 0 ] [ int ( pt [ 0 ] ) % d [ 0 ] ] [ int ( pt [ 1 ] ) % d [ 1 ] ] [ int ( pt [ 2 ] ) % d [ 2 ] ] / dr [ 0 ] , dV [ 1 ] [ int ( pt [ 0 ] ) % d [ 0 ] ] [ int ( pt [ 1 ] ) % d [ 1 ] ] [ int ( pt [ 2 ] ) % d [ 2 ] ] / dr [ 0 ] , dV [ 2 ] [ int ( pt [ 0 ] ) % d [ 0 ] ] [ int ( pt [ 1 ] ) % d [ 1 ] ] [ int ( pt [ 2 ] ) % d [ 2 ] ] / dr [ 0 ] ] for pt in s ] )
# if ( step % 100 = = 0 ) :
# print ( edV )
# Update according to force due to potential and string elasticity
ds_plus = s - np . roll ( s , 1 , axis = 0 )
ds_minus = s - np . roll ( s , - 1 , axis = 0 )
ds_plus [ 0 ] = ( ds_plus [ 0 ] - ds_plus [ 0 ] )
ds_minus [ - 1 ] = ( ds_minus [ - 1 ] - ds_minus [ - 1 ] )
Fpot = edV
Fel = keff * ( la . norm ( ds_plus ) - la . norm ( ds0_plus ) ) * ( ds_plus / la . norm ( ds_plus ) )
Fel += keff * ( la . norm ( ds_minus ) - la . norm ( ds0_minus ) ) * ( ds_minus / la . norm ( ds_minus ) )
s -= h * ( Fpot + Fel )
# Fix endpoints
s [ 0 ] = s0 [ 0 ]
s [ - 1 ] = s0 [ - 1 ]
# Reparametrize string
ds = s - np . roll ( s , 1 , axis = 0 )
ds [ 0 ] = ( ds [ 0 ] - ds [ 0 ] )
ls = np . cumsum ( la . norm ( ds , axis = 1 ) )
ls = ls / ls [ - 1 ]
fi = interp1d ( ls , s , axis = 0 )
s = fi ( g1 )
tol = la . norm ( ( s - s0 ) * dr ) / n_images / h
if tol > 1e10 :
raise ValueError ( "Pathfinding failed, path diverged! Consider reducing h to " "avoid divergence." )
if step > min_iter and tol < max_tol :
print ( "Converged at step {}" . format ( step ) )
break
if step % 100 == 0 :
print ( "Step {} - ds = {}" . format ( step , tol ) )
return s |
def validate_headers ( self ) :
"""Check if CSV metadata files have the right format .""" | super ( ) . validate ( )
self . validate_header ( self . channeldir , self . channelinfo , CHANNEL_INFO_HEADER )
self . validate_header ( self . channeldir , self . contentinfo , CONTENT_INFO_HEADER )
if self . has_exercises ( ) :
self . validate_header ( self . channeldir , self . exercisesinfo , EXERCISE_INFO_HEADER )
self . validate_header ( self . channeldir , self . questionsinfo , EXERCISE_QUESTIONS_INFO_HEADER ) |
def _qualified_key ( self , key ) :
"""Prepends the configured prefix to the key ( if applicable ) .
: param key : The unprefixed key .
: return : The key with any configured prefix prepended .""" | pfx = self . key_prefix if self . key_prefix is not None else ''
return '{}{}' . format ( pfx , key ) |
def _is_junction ( arg ) :
'''Return True , if arg is a junction statement .''' | return isinstance ( arg , dict ) and len ( arg ) == 1 and next ( six . iterkeys ( arg ) ) == 'junction' |
def is_docstring ( tokens , previous_logical ) :
"""Return found docstring
' A docstring is a string literal that occurs as the first statement in a
module , function , class , '
http : / / www . python . org / dev / peps / pep - 0257 / # what - is - a - docstring""" | for token_type , text , start , _ , _ in tokens :
if token_type == tokenize . STRING :
break
elif token_type != tokenize . INDENT :
return False
else :
return False
line = text . lstrip ( )
start , start_triple = _find_first_of ( line , START_DOCSTRING_TRIPLE )
if ( previous_logical . startswith ( "def " ) or previous_logical . startswith ( "class " ) ) :
if start == 0 :
return text |
def check_accesspoints ( sess ) :
"""check the status of all connected access points""" | ap_names = walk_data ( sess , name_ap_oid , helper ) [ 0 ]
ap_operationals = walk_data ( sess , operational_ap_oid , helper ) [ 0 ]
ap_availabilitys = walk_data ( sess , availability_ap_oid , helper ) [ 0 ]
ap_alarms = walk_data ( sess , alarm_ap_oid , helper ) [ 0 ]
# ap _ ip = walk _ data ( sess , ip _ ap _ oid , helper ) # no result
helper . add_summary ( "Access Points Status" )
for x in range ( len ( ap_names ) ) :
ap_name = ap_names [ x ]
ap_operational = ap_operationals [ x ]
ap_availability = ap_availabilitys [ x ]
ap_alarm = ap_alarms [ x ]
# Add all states to the long output
helper . add_long_output ( "%s - Operational: %s - Availabilty: %s - Alarm: %s" % ( ap_name , operational_states [ int ( ap_operational ) ] , availability_states [ int ( ap_availability ) ] , alarm_states [ int ( ap_alarm ) ] ) )
# Operational State
if ap_operational != "1" and ap_operational != "4" :
helper . status ( critical )
helper . add_summary ( "%s Operational State: %s" % ( ap_name , operational_states [ int ( ap_operational ) ] ) )
# Avaiability State
if ap_availability != "3" :
helper . status ( critical )
helper . add_summary ( "%s Availability State: %s" % ( ap_name , availability_states [ int ( ap_availability ) ] ) )
# Alarm State
if ap_alarm == "2" :
helper . status ( warning )
helper . add_summary ( "%s Controller Alarm State: %s" % ( ap_name , alarm_states [ int ( ap_alarm ) ] ) )
if ap_alarm == "3" or ap_alarm == "4" :
helper . status ( critical )
helper . add_summary ( "%s Controller Alarm State: %s" % ( ap_name , alarm_states [ int ( ap_alarm ) ] ) ) |
def gi ( ip , lo = None , iq = None , ico = None , pl = None ) :
"""This function is a wrapper for
: meth : ` ~ pywbem . WBEMConnection . GetInstance ` .
Retrieve an instance .
Parameters :
ip ( : class : ` ~ pywbem . CIMInstanceName ` ) :
Instance path .
If this object does not specify a namespace , the default namespace of
the connection is used .
Its ` host ` attribute will be ignored .
lo ( : class : ` py : bool ` ) :
LocalOnly flag : Exclude inherited properties .
` None ` will cause the server default of ` True ` to be used .
Deprecated in : term : ` DSP0200 ` : WBEM server implementations for ` True `
may vary ; this parameter should be set to ` False ` by the caller .
iq ( : class : ` py : bool ` ) :
IncludeQualifiers flag : Include qualifiers .
` None ` will cause the server default of ` False ` to be used .
Deprecated in : term : ` DSP0200 ` : Clients cannot rely on qualifiers to
be returned in this operation .
ico ( : class : ` py : bool ` ) :
IncludeClassOrigin flag : Include class origin information for the
properties in the retrieved instance .
` None ` will cause the server default of ` False ` to be used .
Deprecated in : term : ` DSP0200 ` : WBEM servers may either implement this
parameter as specified , or may treat any specified value as ` False ` .
pl ( : term : ` string ` or : term : ` py : iterable ` of : term : ` string ` ) :
PropertyList : Names of properties to be included ( if not otherwise
excluded ) . An empty iterable indicates to include no properties .
If ` None ` , all properties will be included .
Returns :
: class : ` ~ pywbem . CIMInstance ` :
The instance , with its ` path ` attribute being a
: class : ` ~ pywbem . CIMInstanceName ` object with its attributes set as
follows :
* ` classname ` : Name of the creation class of the instance .
* ` keybindings ` : Keybindings of the instance .
* ` namespace ` : Name of the CIM namespace containing the instance .
* ` host ` : ` None ` , indicating the WBEM server is unspecified .""" | return CONN . GetInstance ( ip , LocalOnly = lo , IncludeQualifiers = iq , IncludeClassOrigin = ico , PropertyList = pl ) |
def load_and_run_pipeline ( pipeline_name , pipeline_context_input = None , working_dir = None , context = None , parse_input = True , loader = None ) :
"""Load and run the specified pypyr pipeline .
This function runs the actual pipeline by name . If you are running another
pipeline from within a pipeline , call this , not main ( ) . Do call main ( )
instead for your 1st pipeline if there are pipelines calling pipelines .
By default pypyr uses file loader . This means that pipeline _ name . yaml
should be in the working _ dir / pipelines / directory .
Args :
pipeline _ name ( str ) : Name of pipeline , sans . yaml at end .
pipeline _ context _ input ( str ) : Initialize the pypyr context with this
string .
working _ dir ( path ) : Look for pipelines and modules in this directory .
If context arg passed , will use context . working _ dir and
ignore this argument . If context is None , working _ dir
must be specified .
context ( pypyr . context . Context ) : Use if you already have a
Context object , such as if you are running a pipeline from
within a pipeline and you want to re - use the same context
object for the child pipeline . Any mutations of the context by
the pipeline will be against this instance of it .
parse _ input ( bool ) : run context _ parser in pipeline .
loader ( str ) : str . optional . Absolute name of pipeline loader module .
If not specified will use pypyr . pypeloaders . fileloader .
Returns :
None""" | logger . debug ( f"you asked to run pipeline: {pipeline_name}" )
if loader :
logger . debug ( f"you set the pype loader to: {loader}" )
else :
loader = 'pypyr.pypeloaders.fileloader'
logger . debug ( f"use default pype loader: {loader}" )
logger . debug ( f"you set the initial context to: {pipeline_context_input}" )
if context is None :
context = pypyr . context . Context ( )
context . working_dir = working_dir
else :
working_dir = context . working_dir
# pipeline loading deliberately outside of try catch . The try catch will
# try to run a failure - handler from the pipeline , but if the pipeline
# doesn ' t exist there is no failure handler that can possibly run so this
# is very much a fatal stop error .
loader_module = pypyr . moduleloader . get_module ( loader )
try :
get_pipeline_definition = getattr ( loader_module , 'get_pipeline_definition' )
except AttributeError :
logger . error ( f"The pipeline loader {loader_module} doesn't have a " "get_pipeline_definition(pipeline_name, working_dir) function." )
raise
logger . debug ( f"loading the pipeline definition with {loader_module}" )
pipeline_definition = get_pipeline_definition ( pipeline_name = pipeline_name , working_dir = working_dir )
logger . debug ( f"{loader_module} done" )
run_pipeline ( pipeline = pipeline_definition , pipeline_context_input = pipeline_context_input , context = context , parse_input = parse_input ) |
def delete_data ( self , url , * args , ** kwargs ) :
"""Deletes data under provided url
Returns status as boolean .
Args :
* * url * * : address of file to be deleted
. . versionadded : : 0.3.2
* * additional _ headers * * : ( optional ) Additional headers
to be used with request
Returns :
Boolean . True if request was successful . False if not .""" | res = self . _conn . delete ( url , headers = self . _prepare_headers ( ** kwargs ) )
if res . status_code == 200 or res . status_code == 202 :
return True
else :
return False |
def smartResizeColumnsToContents ( self ) :
"""Resizes the columns to the contents based on the user preferences .""" | self . blockSignals ( True )
self . setUpdatesEnabled ( False )
header = self . header ( )
header . blockSignals ( True )
columns = range ( self . columnCount ( ) )
sizes = [ self . columnWidth ( c ) for c in columns ]
header . resizeSections ( header . ResizeToContents )
for col in columns :
width = self . columnWidth ( col )
if ( width < sizes [ col ] ) :
self . setColumnWidth ( col , sizes [ col ] )
header . blockSignals ( False )
self . setUpdatesEnabled ( True )
self . blockSignals ( False ) |
def read_item ( self , from_date = None ) :
"""Read items and return them one by one .
: param from _ date : start date for incremental reading .
: return : next single item when any available .
: raises ValueError : ` metadata _ _ timestamp ` field not found in index
: raises NotFoundError : index not found in ElasticSearch""" | search_query = self . _build_search_query ( from_date )
for hit in helpers . scan ( self . _es_conn , search_query , scroll = '300m' , index = self . _es_index , preserve_order = True ) :
yield hit |
def omegac ( self , R ) :
"""NAME :
omegac
PURPOSE :
calculate the circular angular speed at R in potential Pot
INPUT :
Pot - Potential instance or list of such instances
R - Galactocentric radius ( can be Quantity )
OUTPUT :
circular angular speed
HISTORY :
2011-10-09 - Written - Bovy ( IAS )""" | return nu . sqrt ( - self . Rforce ( R , use_physical = False ) / R ) |
def get_current_observation_date ( self ) :
"""Get the date of the current observation by looking in the header
of the observation for the DATE and EXPTIME keywords .
The ' DATE AT MIDDLE OF OBSERVATION ' of the observation is returned
@ return : Time""" | # All HDU elements have the same date and time so just use
# last one , sometimes the first one is missing the header , in MEF
header = self . get_current_cutout ( ) . hdulist [ - 1 ] . header
mjd_obs = float ( header . get ( 'MJD-OBS' ) )
exptime = float ( header . get ( 'EXPTIME' ) )
mpc_date = Time ( mjd_obs , format = 'mjd' , scale = 'utc' , precision = config . read ( 'MPC.DATE_PRECISION' ) )
mpc_date += TimeDelta ( exptime * units . second ) / 2.0
mpc_date = mpc_date . mpc
return mpc_date |
def has_unknown_attachment_error ( self , page_id ) :
"""Check has unknown attachment error on page
: param page _ id :
: return :""" | unknown_attachment_identifier = 'plugins/servlet/confluence/placeholder/unknown-attachment'
result = self . get_page_by_id ( page_id , expand = 'body.view' )
if len ( result ) == 0 :
return ""
body = ( ( ( result . get ( 'body' ) or { } ) . get ( 'view' ) or { } ) . get ( 'value' ) or { } )
if unknown_attachment_identifier in body :
return result . get ( '_links' ) . get ( 'base' ) + result . get ( '_links' ) . get ( 'tinyui' )
return "" |
def solve ( guess_a , guess_b , power , solver = 'scipy' ) :
"""Constructs a pyneqsys . symbolic . SymbolicSys instance and returns from its ` ` solve ` ` method .""" | # The problem is 2 dimensional so we need 2 symbols
x = sp . symbols ( 'x:2' , real = True )
# There is a user specified parameter ` ` p ` ` in this problem :
p = sp . Symbol ( 'p' , real = True , negative = False , integer = True )
# Our system consists of 2 - non - linear equations :
f = [ x [ 0 ] + ( x [ 0 ] - x [ 1 ] ) ** p / 2 - 1 , ( x [ 1 ] - x [ 0 ] ) ** p / 2 + x [ 1 ] ]
# We construct our ` ` SymbolicSys ` ` instance by passing variables , equations and parameters :
neqsys = SymbolicSys ( x , f , [ p ] )
# ( this will derive the Jacobian symbolically )
# Finally we solve the system using user - specified ` ` solver ` ` choice :
return neqsys . solve ( [ guess_a , guess_b ] , [ power ] , solver = solver ) |
def raw_cron ( user ) :
'''Return the contents of the user ' s crontab
CLI Example :
. . code - block : : bash
salt ' * ' cron . raw _ cron root''' | if _check_instance_uid_match ( user ) or __grains__ . get ( 'os_family' ) in ( 'Solaris' , 'AIX' ) :
cmd = 'crontab -l'
# Preserve line endings
lines = salt . utils . data . decode ( __salt__ [ 'cmd.run_stdout' ] ( cmd , runas = user , ignore_retcode = True , rstrip = False , python_shell = False ) ) . splitlines ( True )
else :
cmd = 'crontab -u {0} -l' . format ( user )
# Preserve line endings
lines = salt . utils . data . decode ( __salt__ [ 'cmd.run_stdout' ] ( cmd , ignore_retcode = True , rstrip = False , python_shell = False ) ) . splitlines ( True )
if lines and lines [ 0 ] . startswith ( '# DO NOT EDIT THIS FILE - edit the master and reinstall.' ) :
del lines [ 0 : 3 ]
return '' . join ( lines ) |
def convenience_calc_probs ( self , params ) :
"""Calculates the probabilities of the chosen alternative , and the long
format probabilities for this model and dataset .""" | shapes , intercepts , betas = self . convenience_split_params ( params )
prob_args = ( betas , self . design_3d , self . alt_id_vector , self . rows_to_obs , self . rows_to_alts , self . utility_transform )
prob_kwargs = { "chosen_row_to_obs" : self . chosen_row_to_obs , "return_long_probs" : True }
probability_results = general_calc_probabilities ( * prob_args , ** prob_kwargs )
return probability_results |
def all ( cls , klass , db_session = None ) :
"""returns all objects of specific type - will work correctly with
sqlalchemy inheritance models , you should normally use models
base _ query ( ) instead of this function its for bw . compat purposes
: param klass :
: param db _ session :
: return :""" | db_session = get_db_session ( db_session )
return db_session . query ( klass ) |
def ConfigureLogging ( debug_output = False , filename = None , mode = 'w' , quiet_mode = False ) :
"""Configures the logging root logger .
Args :
debug _ output ( Optional [ bool ] ) : True if the logging should include debug
output .
filename ( Optional [ str ] ) : log filename .
mode ( Optional [ str ] ) : log file access mode .
quiet _ mode ( Optional [ bool ] ) : True if the logging should not include
information output . Note that debug _ output takes precedence over
quiet _ mode .""" | # Remove all possible log handlers . The log handlers cannot be reconfigured
# and therefore must be recreated .
for handler in logging . root . handlers :
logging . root . removeHandler ( handler )
logger = logging . getLogger ( )
if filename and filename . endswith ( '.gz' ) :
handler = CompressedFileHandler ( filename , mode = mode )
elif filename :
handler = logging . FileHandler ( filename , mode = mode )
else :
handler = logging . StreamHandler ( )
format_string = ( '%(asctime)s [%(levelname)s] (%(processName)-10s) PID:%(process)d ' '<%(module)s> %(message)s' )
formatter = logging . Formatter ( format_string )
handler . setFormatter ( formatter )
if debug_output :
level = logging . DEBUG
elif quiet_mode :
level = logging . WARNING
else :
level = logging . INFO
logger . setLevel ( level )
handler . setLevel ( level )
logger . addHandler ( handler ) |
def to_list ( var ) :
"""Checks if given value is a list , tries to convert , if it is not .""" | if var is None :
return [ ]
if isinstance ( var , str ) :
var = var . split ( '\n' )
elif not isinstance ( var , list ) :
try :
var = list ( var )
except TypeError :
raise ValueError ( "{} cannot be converted to the list." . format ( var ) )
return var |
def tokenize ( self , string ) :
"""Tokenize incoming string .""" | if self . language == 'akkadian' :
tokens = tokenize_akkadian_words ( string )
elif self . language == 'arabic' :
tokens = tokenize_arabic_words ( string )
elif self . language == 'french' :
tokens = tokenize_french_words ( string )
elif self . language == 'greek' :
tokens = tokenize_greek_words ( string )
elif self . language == 'latin' :
tokens = tokenize_latin_words ( string )
elif self . language == 'old_norse' :
tokens = tokenize_old_norse_words ( string )
elif self . language == 'middle_english' :
tokens = tokenize_middle_english_words ( string )
elif self . language == 'middle_high_german' :
tokens = tokenize_middle_high_german_words ( string )
else :
tokens = nltk_tokenize_words ( string )
return tokens |
def main ( output_path = None ) :
"""Writes out new python build system
This is needed because CircleCI does not support build matrices
nor parameterisation of cache paths or other aspects of their
config
There ' s also the added bonus of validating the yaml as we go .
Additionally , we template and write Docker and docker - compose files
for multiple python versions , as Docker ` FROM ` statements are also
un - templatable using environment variables or similar .
The bulk of the config structure is parsed templated yaml , which
seems the most succinct way of building deeply nested dictionaries and lists ,
and also cleanly maps to the appearance of config . yml before & after templating .
The main job blocks ( build , test , deploy ) are expanded as the product of python versions
and mbed cloud environments , before being recombined into the job listing .
Jobs are chained into a CircleCI workflow using a graph
( in which nodes are job identifiers , and edges describe the dependencies
and any additional parameters )""" | config_output_file = output_path or os . path . join ( PROJECT_ROOT , '.circleci' , 'config.yml' )
yaml_structure = generate_circle_output ( )
with open ( config_output_file , 'w' ) as fh :
yaml_content = yaml . safe_dump ( data = yaml_structure , default_flow_style = False )
fh . write ( f'#\n' f'# This file is autogenerated, do not modify manually. ' f'See {author_file} for instructions.\n' f'#\n' f'{yaml_content}' )
for path , content in generate_docker_targets ( ) . items ( ) :
LOG . info ( 'writing %s' , path )
with open ( path , 'w' ) as fh :
fh . write ( content ) |
def observed_data_to_xarray ( self ) :
"""Convert observed data to xarray .""" | data = { }
for idx , var_name in enumerate ( self . arg_names ) : # Use emcee3 syntax , else use emcee2
data [ var_name ] = ( self . sampler . log_prob_fn . args [ idx ] if hasattr ( self . sampler , "log_prob_fn" ) else self . sampler . args [ idx ] )
return dict_to_dataset ( data , library = self . emcee , coords = self . coords , dims = self . dims ) |
def to_marker ( marker ) :
"""Serializes marker to string
: param marker : object to serialize
: return : string id""" | from sevenbridges . models . marker import Marker
if not marker :
raise SbgError ( 'Marker is required!' )
elif isinstance ( marker , Marker ) :
return marker . id
elif isinstance ( marker , six . string_types ) :
return marker
else :
raise SbgError ( 'Invalid marker parameter!' ) |
def do_ams_auth ( endpoint , body ) :
'''Acquire Media Services Authentication Token .
Args :
endpoint ( str ) : Azure Media Services Initial Endpoint .
body ( str ) : A Content Body .
Returns :
HTTP response . JSON body .''' | headers = { "content-type" : "application/x-www-form-urlencoded" , "Accept" : json_acceptformat }
return requests . post ( endpoint , data = body , headers = headers ) |
def resolvePublic ( self , pubID ) :
"""Try to lookup the catalog local reference associated to a
public ID in that catalog""" | ret = libxml2mod . xmlACatalogResolvePublic ( self . _o , pubID )
return ret |
def teardown_appcontext ( self , func : Callable ) -> Callable :
"""Add a teardown app ( context ) function .
This is designed to be used as a decorator . An example usage ,
. . code - block : : python
@ app . teardown _ appcontext
def func ( ) :
Arguments :
func : The teardown function itself .
name : Optional blueprint key name .""" | handler = ensure_coroutine ( func )
self . teardown_appcontext_funcs . append ( handler )
return func |
def request ( self , session_id ) :
"""Force the termination of a NETCONF session ( not the current one ! )
* session _ id * is the session identifier of the NETCONF session to be terminated as a string""" | node = new_ele ( "kill-session" )
sub_ele ( node , "session-id" ) . text = session_id
return self . _request ( node ) |
def _interfaces_removed ( self , object_path , interfaces ) :
"""Internal method .""" | old_state = copy ( self . _objects [ object_path ] )
for interface in interfaces :
del self . _objects [ object_path ] [ interface ]
new_state = self . _objects [ object_path ]
if Interface [ 'Drive' ] in interfaces :
self . _detect_toggle ( 'has_media' , self . get ( object_path , old_state ) , self . get ( object_path , new_state ) , None , 'media_removed' )
if Interface [ 'Block' ] in interfaces :
slave = self . get ( object_path , old_state ) . luks_cleartext_slave
if slave :
if not self . _has_job ( slave . object_path , 'device_locked' ) :
self . trigger ( 'device_locked' , slave )
if self . _objects [ object_path ] :
self . trigger ( 'device_changed' , self . get ( object_path , old_state ) , self . get ( object_path , new_state ) )
else :
del self . _objects [ object_path ]
if object_kind ( object_path ) in ( 'device' , 'drive' ) :
self . trigger ( 'device_removed' , self . get ( object_path , old_state ) ) |
def checkIfHashIsCracked ( hash = None ) :
"""Method that checks if the given hash is stored in the md5db . net website .
: param hash : hash to verify .
: return : Resolved hash . If nothing was found , it will return an empty list .""" | apiURL = "http://md5db.net/api/" + str ( hash ) . lower ( )
try : # Getting the result of the query from MD5db . net
data = urllib2 . urlopen ( apiURL ) . read ( )
return data
except : # No information was found , then we return a null entity
return [ ] |
def detect_intent_stream ( project_id , session_id , audio_file_path , language_code ) :
"""Returns the result of detect intent with streaming audio as input .
Using the same ` session _ id ` between requests allows continuation
of the conversaion .""" | import dialogflow_v2 as dialogflow
session_client = dialogflow . SessionsClient ( )
# Note : hard coding audio _ encoding and sample _ rate _ hertz for simplicity .
audio_encoding = dialogflow . enums . AudioEncoding . AUDIO_ENCODING_LINEAR_16
sample_rate_hertz = 16000
session_path = session_client . session_path ( project_id , session_id )
print ( 'Session path: {}\n' . format ( session_path ) )
def request_generator ( audio_config , audio_file_path ) :
query_input = dialogflow . types . QueryInput ( audio_config = audio_config )
# The first request contains the configuration .
yield dialogflow . types . StreamingDetectIntentRequest ( session = session_path , query_input = query_input )
# Here we are reading small chunks of audio data from a local
# audio file . In practice these chunks should come from
# an audio input device .
with open ( audio_file_path , 'rb' ) as audio_file :
while True :
chunk = audio_file . read ( 4096 )
if not chunk :
break
# The later requests contains audio data .
yield dialogflow . types . StreamingDetectIntentRequest ( input_audio = chunk )
audio_config = dialogflow . types . InputAudioConfig ( audio_encoding = audio_encoding , language_code = language_code , sample_rate_hertz = sample_rate_hertz )
requests = request_generator ( audio_config , audio_file_path )
responses = session_client . streaming_detect_intent ( requests )
print ( '=' * 20 )
for response in responses :
print ( 'Intermediate transcript: "{}".' . format ( response . recognition_result . transcript ) )
# Note : The result from the last response is the final transcript along
# with the detected content .
query_result = response . query_result
print ( '=' * 20 )
print ( 'Query text: {}' . format ( query_result . query_text ) )
print ( 'Detected intent: {} (confidence: {})\n' . format ( query_result . intent . display_name , query_result . intent_detection_confidence ) )
print ( 'Fulfillment text: {}\n' . format ( query_result . fulfillment_text ) ) |
def decimals ( self ) :
"""Returns the number of decimal places for the values in the ` value < N > `
attributes of the current mode .""" | self . _decimals , value = self . get_attr_int ( self . _decimals , 'decimals' )
return value |
def pictures ( self ) :
"""list [ Picture ] : List of embedded pictures""" | return [ b for b in self . metadata_blocks if b . code == Picture . code ] |
def main ( argv = None ) :
"""psd - tools command line utility .
Usage :
psd - tools export < input _ file > < output _ file > [ options ]
psd - tools show < input _ file > [ options ]
psd - tools debug < input _ file > [ options ]
psd - tools - h | - - help
psd - tools - - version
Options :
- v - - verbose Be more verbose .
Example :
psd - tools show example . psd # Show the file content
psd - tools export example . psd example . png # Export as PNG
psd - tools export example . psd [ 0 ] example - 0 . png # Export layer as PNG""" | args = docopt . docopt ( main . __doc__ , version = __version__ , argv = argv )
if args [ '--verbose' ] :
logger . setLevel ( logging . DEBUG )
else :
logger . setLevel ( logging . INFO )
if args [ 'export' ] :
input_parts = args [ '<input_file>' ] . split ( '[' )
input_file = input_parts [ 0 ]
if len ( input_parts ) > 1 :
indices = [ int ( x . rstrip ( ']' ) ) for x in input_parts [ 1 : ] ]
else :
indices = [ ]
layer = PSDImage . open ( input_file )
for index in indices :
layer = layer [ index ]
if isinstance ( layer , PSDImage ) and layer . has_preview ( ) :
image = layer . topil ( )
else :
image = layer . compose ( )
image . save ( args [ '<output_file>' ] )
elif args [ 'show' ] :
psd = PSDImage . open ( args [ '<input_file>' ] )
pprint ( psd )
elif args [ 'debug' ] :
psd = PSDImage . open ( args [ '<input_file>' ] )
pprint ( psd . _record ) |
def get_random_choice ( self ) :
"""returns a random name from the class""" | i = random . randint ( 0 , len ( self . dat ) - 1 )
return self . dat [ i ] [ 'name' ] |
def getBindings ( varList , startLevel = 0 ) :
"""Given a list of identifiers as strings , construct the dictonary of the identifiers as keys and the evaluated values of
the ientifiers as the values . Ie something like { id : full _ lookup ( id ) for id in identifiers } . full _ lookup here is looking
up the value of an identifier by looking at all the bindings at every level in the call stack .
Return ( bindings , unbound ) where the bindings is the dictionary of found bindings and the set of unbound identifiers .""" | varsToFind = set ( varList )
bindings = { }
# We start at the level of the caller of getBindings
frame = inspect . currentframe ( )
try :
for i in xrange ( startLevel + 1 ) :
frame = frame . f_back
except :
raise Exception ( "bindings: startLevel {} is too high\n" . format ( startLevel ) )
# while we have a current frame look through it for our identifiers
while frame :
frameLocals = frame . f_locals
localKeys = set ( varsToFind ) . intersection ( frameLocals )
for v in localKeys :
bindings [ v ] = frameLocals [ v ]
varsToFind -= localKeys
if not varsToFind :
return ( bindings , varsToFind )
frameGlobals = frame . f_globals
globalKeys = set ( varsToFind ) . intersection ( frameGlobals )
for v in globalKeys :
bindings [ v ] = frameGlobals [ v ]
varsToFind -= localKeys
if not varsToFind :
return ( bindings , varsToFind )
frame = frame . f_back
return ( bindings , varsToFind ) |
def top_priority_effect ( effects ) :
"""Given a collection of variant transcript effects ,
return the top priority object . ExonicSpliceSite variants require special
treatment since they actually represent two effects - - the splicing modification
and whatever else would happen to the exonic sequence if nothing else gets
changed . In cases where multiple transcripts give rise to multiple
effects , use a variety of filtering and sorting heuristics to pick
the canonical transcript .""" | if len ( effects ) == 0 :
raise ValueError ( "List of effects cannot be empty" )
effects = map ( select_between_exonic_splice_site_and_alternate_effect , effects )
effects_grouped_by_gene = apply_groupby ( effects , fn = gene_id_of_associated_transcript , skip_none = False )
if None in effects_grouped_by_gene :
effects_without_genes = effects_grouped_by_gene . pop ( None )
else :
effects_without_genes = [ ]
# if we had any effects associated with genes then choose one of those
if len ( effects_grouped_by_gene ) > 0 :
effects_with_genes = [ top_priority_effect_for_single_gene ( gene_effects ) for gene_effects in effects_grouped_by_gene . values ( ) ]
return max ( effects_with_genes , key = multi_gene_effect_sort_key )
else : # if all effects were without genes then choose the best among those
assert len ( effects_without_genes ) > 0
return max ( effects_without_genes , key = multi_gene_effect_sort_key ) |
def _compile_create_encoding ( self , sql , connection , blueprint ) :
"""Append the character set specifications to a command .
: type sql : str
: type connection : orator . connections . Connection
: type blueprint : Blueprint
: rtype : str""" | charset = blueprint . charset or connection . get_config ( "charset" )
if charset :
sql += " DEFAULT CHARACTER SET %s" % charset
collation = blueprint . collation or connection . get_config ( "collation" )
if collation :
sql += " COLLATE %s" % collation
return sql |
def last_ehlo_response ( self , response : SMTPResponse ) -> None :
"""When setting the last EHLO response , parse the message for supported
extensions and auth methods .""" | extensions , auth_methods = parse_esmtp_extensions ( response . message )
self . _last_ehlo_response = response
self . esmtp_extensions = extensions
self . server_auth_methods = auth_methods
self . supports_esmtp = True |
def DataClass ( name , columns , constraint = None ) :
"""Use the DataClass to define a class , but with some extra features :
1 . restrict the datatype of property
2 . restrict if ` required ` , or if ` nulls ` are allowed
3 . generic constraints on object properties
It is expected that this class become a real class ( or be removed ) in the
long term because it is expensive to use and should only be good for
verifying program correctness , not user input .
: param name : Name of the class we are creating
: param columns : Each columns [ i ] has properties {
" name " , - ( required ) name of the property
" required " , - False if it must be defined ( even if None )
" nulls " , - True if property can be None , or missing
" default " , - A default value , if none is provided
" type " - a Python datatype
: param constraint : a JSON query Expression for extra constraints ( return true if all constraints are met )
: return : The class that has been created""" | columns = wrap ( [ { "name" : c , "required" : True , "nulls" : False , "type" : object } if is_text ( c ) else c for c in columns ] )
slots = columns . name
required = wrap ( filter ( lambda c : c . required and not c . nulls and not c . default , columns ) ) . name
nulls = wrap ( filter ( lambda c : c . nulls , columns ) ) . name
defaults = { c . name : coalesce ( c . default , None ) for c in columns }
types = { c . name : coalesce ( c . jx_type , object ) for c in columns }
code = expand_template ( """
from __future__ import unicode_literals
from mo_future import is_text, is_binary
from collections import Mapping
meta = None
types_ = {{types}}
defaults_ = {{defaults}}
class {{class_name}}(Mapping):
__slots__ = {{slots}}
def _constraint(row, rownum, rows):
try:
return {{constraint_expr}}
except Exception as e:
return False
def __init__(self, **kwargs):
if not kwargs:
return
for s in {{slots}}:
object.__setattr__(self, s, kwargs.get(s, {{defaults}}.get(s, None)))
missed = {{required}}-set(kwargs.keys())
if missed:
Log.error("Expecting properties {"+"{missed}}", missed=missed)
illegal = set(kwargs.keys())-set({{slots}})
if illegal:
Log.error("{"+"{names}} are not a valid properties", names=illegal)
if not self._constraint(0, [self]):
Log.error("constraint not satisfied {"+"{expect}}\\n{"+"{value|indent}}", expect={{constraint}}, value=self)
def __getitem__(self, item):
return getattr(self, item)
def __setitem__(self, item, value):
setattr(self, item, value)
return self
def __setattr__(self, item, value):
if item not in {{slots}}:
Log.error("{"+"{item|quote}} not valid attribute", item=item)
object.__setattr__(self, item, value)
if not self._constraint(0, [self]):
Log.error("constraint not satisfied {"+"{expect}}\\n{"+"{value|indent}}", expect={{constraint}}, value=self)
def __getattr__(self, item):
Log.error("{"+"{item|quote}} not valid attribute", item=item)
def __hash__(self):
return object.__hash__(self)
def __eq__(self, other):
if isinstance(other, {{class_name}}) and dict(self)==dict(other) and self is not other:
Log.error("expecting to be same object")
return self is other
def __dict__(self):
return {k: getattr(self, k) for k in {{slots}}}
def items(self):
return ((k, getattr(self, k)) for k in {{slots}})
def __copy__(self):
_set = object.__setattr__
output = object.__new__({{class_name}})
{{assign}}
return output
def __iter__(self):
return {{slots}}.__iter__()
def __len__(self):
return {{len_slots}}
def __str__(self):
return str({{dict}})
""" , { "class_name" : name , "slots" : "(" + ( ", " . join ( quote ( s ) for s in slots ) ) + ")" , "required" : "{" + ( ", " . join ( quote ( s ) for s in required ) ) + "}" , "nulls" : "{" + ( ", " . join ( quote ( s ) for s in nulls ) ) + "}" , "defaults" : Literal ( defaults ) . to_python ( ) , "len_slots" : len ( slots ) , "dict" : "{" + ( ", " . join ( quote ( s ) + ": self." + s for s in slots ) ) + "}" , "assign" : "; " . join ( "_set(output, " + quote ( s ) + ", self." + s + ")" for s in slots ) , "types" : "{" + ( "," . join ( quote ( k ) + ": " + v . __name__ for k , v in types . items ( ) ) ) + "}" , "constraint_expr" : Python [ jx_expression ( constraint ) ] . to_python ( ) , "constraint" : value2json ( constraint ) , } , )
output = _exec ( code , name )
register_data ( output )
return output |
def get_random_word ( dictionary , starting_letter = None ) :
"""Takes the dictionary to read from and returns a random word
optionally accepts a starting letter""" | if starting_letter is None :
starting_letter = random . choice ( list ( dictionary . keys ( ) ) )
try :
to_return = random . choice ( dictionary [ starting_letter ] )
except KeyError :
msg = "Dictionary does not contain a word starting with '{}'"
raise NoWordForLetter ( msg . format ( starting_letter ) )
return to_return |
def check_map ( uri , url_root ) :
"""return a tuple of the rule and kw .""" | # TODO : Building the Map each time this is called seems like it could be more effiecent .
result = [ ]
try :
result = db . execute ( text ( fetch_query_string ( 'select_route_where_dynamic.sql' ) ) ) . fetchall ( )
except OperationalError as err :
current_app . logger . error ( "OperationalError: %s" , err )
return ( None , None )
if result : # routes = result . as _ dict ( )
# ( routes , col _ names ) = rowify ( result , c . description )
# current _ app . logger . debug ( [ x [ ' rule ' ] for x in routes ] )
rules = map ( lambda r : Rule ( r [ 'rule' ] , endpoint = 'dynamic' ) , result )
d_map = Map ( rules )
map_adapter = d_map . bind ( url_root )
# current _ app . logger . debug ( uri )
try :
( rule , rule_kw ) = map_adapter . match ( path_info = uri , return_rule = True )
# current _ app . logger . debug ( rule )
return ( str ( rule ) , rule_kw )
except HTTPException :
pass
return ( None , { } ) |
def _get_repr_list ( self ) :
"""Get some representation data common to all HDU types""" | spacing = ' ' * 2
text = [ '' ]
text . append ( "%sfile: %s" % ( spacing , self . _filename ) )
text . append ( "%sextension: %d" % ( spacing , self . _info [ 'hdunum' ] - 1 ) )
text . append ( "%stype: %s" % ( spacing , _hdu_type_map [ self . _info [ 'hdutype' ] ] ) )
extname = self . get_extname ( )
if extname != "" :
text . append ( "%sextname: %s" % ( spacing , extname ) )
extver = self . get_extver ( )
if extver != 0 :
text . append ( "%sextver: %s" % ( spacing , extver ) )
return text , spacing |
def import_wikipage ( self , slug , content , ** attrs ) :
"""Import a Wiki page and return a : class : ` WikiPage ` object .
: param slug : slug of the : class : ` WikiPage `
: param content : content of the : class : ` WikiPage `
: param attrs : optional attributes for : class : ` Task `""" | return WikiPages ( self . requester ) . import_ ( self . id , slug , content , ** attrs ) |
def _get_synset_offsets ( synset_idxes ) :
"""Returs pointer offset in the WordNet file for every synset index .
Notes
Internal function . Do not call directly .
Preserves order - - for [ x , y , z ] returns [ offset ( x ) , offset ( y ) , offset ( z ) ] .
Parameters
synset _ idxes : list of ints
Lists synset IDs , which need offset .
Returns
list of ints
Lists pointer offsets in Wordnet file .""" | offsets = { }
current_seeked_offset_idx = 0
ordered_synset_idxes = sorted ( synset_idxes )
with codecs . open ( _SOI , 'rb' , 'utf-8' ) as fin :
for line in fin :
split_line = line . split ( ':' )
while current_seeked_offset_idx < len ( ordered_synset_idxes ) and split_line [ 0 ] == str ( ordered_synset_idxes [ current_seeked_offset_idx ] ) : # Looping on single line entries in case synset _ indexes contains duplicates .
offsets [ synset_idxes [ current_seeked_offset_idx ] ] = int ( split_line [ 1 ] )
current_seeked_offset_idx += 1
if current_seeked_offset_idx >= len ( synset_idxes ) :
break
return [ offsets [ synset_idx ] for synset_idx in synset_idxes ] |
def do_eb ( self , arg ) :
"""[ ~ process ] eb < address > < data > - write the data to the specified address""" | # TODO
# data parameter should be optional , use a child Cmd here
pid = self . get_process_id_from_prefix ( )
token_list = self . split_tokens ( arg , 2 )
address = self . input_address ( token_list [ 0 ] , pid )
data = HexInput . hexadecimal ( ' ' . join ( token_list [ 1 : ] ) )
self . write_memory ( address , data , pid ) |
def l3_tenant_id ( cls ) :
"""Returns id of tenant owning hosting device resources .""" | if cls . _l3_tenant_uuid is None :
if hasattr ( cfg . CONF . keystone_authtoken , 'project_domain_id' ) : # TODO ( sridar ) : hack for now to determing if keystone v3
# API is to be used .
cls . _l3_tenant_uuid = cls . _get_tenant_id_using_keystone_v3 ( )
else :
cls . _l3_tenant_uuid = cls . _get_tenant_id_using_keystone_v2 ( )
return cls . _l3_tenant_uuid |
def reqFundamentalData ( self , contract : Contract , reportType : str , fundamentalDataOptions : List [ TagValue ] = None ) -> str :
"""Get fundamental data of a contract in XML format .
This method is blocking .
https : / / interactivebrokers . github . io / tws - api / fundamentals . html
Args :
contract : Contract to query .
reportType :
* ' ReportsFinSummary ' : Financial summary
* ' ReportsOwnership ' : Company ' s ownership
* ' ReportSnapshot ' : Company ' s financial overview
* ' ReportsFinStatements ' : Financial Statements
* ' RESC ' : Analyst Estimates
* ' CalendarReport ' : Company ' s calendar
fundamentalDataOptions : Unknown""" | return self . _run ( self . reqFundamentalDataAsync ( contract , reportType , fundamentalDataOptions ) ) |
def get_program_status_brok ( self , brok_type = 'program_status' ) :
"""Create a program status brok
Initially builds the running properties and then , if initial status brok ,
get the properties from the Config class where an entry exist for the brok
' full _ status '
: return : Brok with program status data
: rtype : alignak . brok . Brok""" | # Get the running statistics
data = { "is_running" : True , "instance_id" : self . instance_id , # " alignak _ name " : self . alignak _ name ,
"instance_name" : self . name , "last_alive" : time . time ( ) , "pid" : os . getpid ( ) , '_running' : self . get_scheduler_stats ( details = True ) , '_config' : { } , '_macros' : { } }
# Get configuration data from the pushed configuration
cls = self . pushed_conf . __class__
for prop , entry in list ( cls . properties . items ( ) ) : # Is this property intended for broking ?
if 'full_status' not in entry . fill_brok :
continue
data [ '_config' ] [ prop ] = self . pushed_conf . get_property_value_for_brok ( prop , cls . properties )
# data [ ' _ config ' ] [ prop ] = getattr ( self . pushed _ conf , prop , entry . default )
# Get the macros from the pushed configuration and try to resolve
# the macros to provide the result in the status brok
macro_resolver = MacroResolver ( )
macro_resolver . init ( self . pushed_conf )
for macro_name in sorted ( self . pushed_conf . macros ) :
data [ '_macros' ] [ macro_name ] = macro_resolver . resolve_simple_macros_in_string ( "$%s$" % macro_name , [ ] , None , None )
logger . debug ( "Program status brok %s data: %s" , brok_type , data )
return Brok ( { 'type' : brok_type , 'data' : data } ) |
def png ( self , val , ng = 'ERR' ) :
"""Print val : ERR in red on STDOUT""" | self . pstd ( self . color . red ( '{}: {}' . format ( val , ng ) ) ) |
def log_env_info ( ) :
"""Prints information about execution environment .""" | logging . info ( 'Collecting environment information...' )
env_info = torch . utils . collect_env . get_pretty_env_info ( )
logging . info ( f'{env_info}' ) |
def _set_attribute ( self , name , value ) :
"""Make sure namespace gets updated when setting attributes .""" | setattr ( self , name , value )
self . namespace . update ( { name : getattr ( self , name ) } ) |
def read_moc_ascii ( moc , filename = None , file = None ) :
"""Read from an ASCII file into a MOC .
Either a filename , or an open file object can be specified .""" | if file is not None :
orders = _read_ascii ( file )
else :
with open ( filename , 'r' ) as f :
orders = _read_ascii ( f )
for text in orders :
if not text :
continue
cells = [ ]
( order , ranges ) = text . split ( '/' )
for r in ranges . split ( ',' ) :
try :
cells . append ( int ( r ) )
except ValueError as e :
( rmin , rmax ) = r . split ( '-' )
cells . extend ( range ( int ( rmin ) , int ( rmax ) + 1 ) )
moc . add ( order , cells ) |
def main ( args ) :
'''surface _ to _ rubbon . main ( args ) can be given a list of arguments , such as sys . argv [ 1 : ] ; these
arguments may include any options and must include exactly one subject id and one output
filename . Additionally one or two surface input filenames must be given . The surface files are
projected into the ribbon and written to the output filename . For more information see the
string stored in surface _ to _ image . info .''' | # Parse the arguments
( args , opts ) = _surface_to_ribbon_parser ( args )
# First , help ?
if opts [ 'help' ] :
print ( info , file = sys . stdout )
return 1
# and if we are verbose , lets setup a note function
verbose = opts [ 'verbose' ]
def note ( s ) :
if verbose :
print ( s , file = sys . stdout )
return verbose
# Add the subjects directory , if there is one
if 'subjects_dir' in opts and opts [ 'subjects_dir' ] is not None :
add_subject_path ( opts [ 'subjects_dir' ] )
# figure out our arguments :
( lhfl , rhfl ) = ( opts [ 'lh_file' ] , opts [ 'rh_file' ] )
if len ( args ) == 0 :
raise ValueError ( 'Not enough arguments provided!' )
elif len ( args ) == 1 : # must be that the subject is in the env ?
sub = find_subject_path ( os . getenv ( 'SUBJECT' ) )
outfl = args [ 0 ]
elif len ( args ) == 2 :
sbpth = find_subject_path ( args [ 0 ] )
if sbpth is not None :
sub = sbpth
else :
sub = find_subject_path ( os . getenv ( 'SUBJECT' ) )
if lhfl is not None :
rhfl = args [ 0 ]
elif rhfl is not None :
lhfl = args [ 0 ]
else :
raise ValueError ( 'Given arg is not a subject: %s' % args [ 0 ] )
outfl = args [ 1 ]
elif len ( args ) == 3 :
sbpth0 = find_subject_path ( args [ 0 ] )
sbpth1 = find_subject_path ( args [ 1 ] )
if sbpth0 is not None :
sub = sbpth0
if lhfl is not None :
rhfl = args [ 1 ]
elif rhfl is not None :
lhfl = args [ 1 ]
else :
raise ValueError ( 'Too many arguments given: %s' % args [ 1 ] )
elif sbpth1 is not None :
sub = sbpth1
if lhfl is not None :
rhfl = args [ 0 ]
elif rhfl is not None :
lhfl = args [ 0 ]
else :
raise ValueError ( 'Too many arguments given: %s' % args [ 0 ] )
else :
sub = find_subject_path ( os . getenv ( 'SUBJECT' ) )
if lhfl is not None or rhfl is not None :
raise ValueError ( 'Too many arguments and no subject given' )
( lhfl , rhfl ) = args
outfl = args [ 2 ]
elif len ( args ) == 4 :
if lhfl is not None or rhfl is not None :
raise ValueError ( 'Too many arguments and no subject given' )
subidx = next ( ( i for ( i , a ) in enumerate ( args ) if find_subject_path ( a ) is not None ) , None )
if subidx is None :
raise ValueError ( 'No subject given' )
sub = find_subject_path ( args [ subidx ] )
del args [ subidx ]
( lhfl , rhfl , outfl ) = args
else :
raise ValueError ( 'Too many arguments provided!' )
if sub is None :
raise ValueError ( 'No subject specified or found in $SUBJECT' )
if lhfl is None and rhfl is None :
raise ValueError ( 'No surfaces provided' )
# check the method
method = opts [ 'method' ] . lower ( )
if method not in [ 'linear' , 'lines' , 'nearest' , 'auto' ] :
raise ValueError ( 'Unsupported method: %s' % method )
# and the datatype
if opts [ 'dtype' ] is None :
dtyp = None
elif opts [ 'dtype' ] . lower ( ) == 'float' :
dtyp = np . float32
elif opts [ 'dtype' ] . lower ( ) == 'int' :
dtyp = np . int32
else :
raise ValueError ( 'Type argument must be float or int' )
if method == 'auto' :
if dtyp is np . float32 :
method = 'linear'
elif dtyp is np . int32 :
method = 'nearest'
else :
method = 'linear'
# Now , load the data :
note ( 'Reading surfaces...' )
( lhdat , rhdat ) = ( None , None )
if lhfl is not None :
note ( ' - Reading LH file: %s' % lhfl )
lhdat = read_surf_file ( lhfl )
if rhfl is not None :
note ( ' - Reading RH file: %s' % rhfl )
rhdat = read_surf_file ( rhfl )
( dat , hemi ) = ( rhdat , 'rh' ) if lhdat is None else ( lhdat , 'lh' ) if rhdat is None else ( ( lhdat , rhdat ) , None )
sub = subject ( sub )
# okay , make the volume . . .
note ( 'Generating volume...' )
vol = sub . cortex_to_image ( dat , hemi = hemi , method = method , fill = opts [ 'fill' ] , dtype = dtyp )
# and write out the file
note ( 'Exporting volume file: %s' % outfl )
save ( outfl , vol , affine = sub . voxel_to_native_matrix )
note ( 'surface_to_image complete!' )
return 0 |
def feed_data ( self , data : bytes ) -> None :
"""代理 feed _ data""" | if self . _parser is not None :
self . _parser . feed_data ( data ) |
def align ( expnums , ccd , version = 's' , dry_run = False ) :
"""Create a ' shifts ' file that transforms the space / flux / time scale of all images to the first image .
This function relies on the . fwhm , . trans . jmp , . phot and . zeropoint . used files for inputs .
The scaling we are computing here is for use in planting sources into the image at the same sky / flux locations
while accounting for motions of sources with time .
: param expnums : list of MegaPrime exposure numbers to add artificial KBOs to ,
the first frame in the list is the reference .
: param ccd : which ccd to work on .
: param version : Add sources to the ' o ' , ' p ' or ' s ' images
: param dry _ run : don ' t push results to VOSpace .""" | # Get the images and supporting files that we need from the VOSpace area
# get _ image and get _ file check if the image / file is already on disk .
# re - computed fluxes from the PSF stars and then recompute x / y / flux scaling .
# some dictionaries to hold the various scale
pos = { }
apcor = { }
mags = { }
zmag = { }
mjdates = { }
for expnum in expnums :
filename = storage . get_image ( expnum , ccd = ccd , version = version )
zmag [ expnum ] = storage . get_zeropoint ( expnum , ccd , prefix = None , version = version )
mjdates [ expnum ] = float ( fits . open ( filename ) [ 0 ] . header . get ( 'MJD-OBS' ) )
apcor [ expnum ] = [ float ( x ) for x in open ( storage . get_file ( expnum , ccd = ccd , version = version , ext = storage . APCOR_EXT ) ) . read ( ) . split ( ) ]
keys = [ 'crval1' , 'cd1_1' , 'cd1_2' , 'crval2' , 'cd2_1' , 'cd2_2' ]
# load the . trans . jmp values into a ' wcs ' like dictionary .
# . trans . jmp maps current frame to reference frame in pixel coordinates .
# the reference frame of all the frames supplied must be the same .
shifts = dict ( zip ( keys , [ float ( x ) for x in open ( storage . get_file ( expnum , ccd = ccd , version = version , ext = 'trans.jmp' ) ) . read ( ) . split ( ) ] ) )
shifts [ 'crpix1' ] = 0.0
shifts [ 'crpix2' ] = 0.0
# now create a wcs object based on those transforms , this wcs links the current frame ' s
# pixel coordinates to the reference frame ' s pixel coordinates .
w = get_wcs ( shifts )
# get the PHOT file that was produced by the mkpsf routine
logging . debug ( "Reading .phot file {}" . format ( expnum ) )
phot = ascii . read ( storage . get_file ( expnum , ccd = ccd , version = version , ext = 'phot' ) , format = 'daophot' )
# compute the small - aperture magnitudes of the stars used in the PSF
import daophot
logging . debug ( "Running phot on {}" . format ( filename ) )
mags [ expnum ] = daophot . phot ( filename , phot [ 'XCENTER' ] , phot [ 'YCENTER' ] , aperture = apcor [ expnum ] [ 0 ] , sky = apcor [ expnum ] [ 1 ] + 1 , swidth = apcor [ expnum ] [ 0 ] , zmag = zmag [ expnum ] )
# covert the x / y positions to positions in Frame 1 based on the trans . jmp values .
logging . debug ( "Doing the XY translation to refrence frame: {}" . format ( w ) )
( x , y ) = w . wcs_pix2world ( mags [ expnum ] [ "XCENTER" ] , mags [ expnum ] [ "YCENTER" ] , 1 )
pos [ expnum ] = numpy . transpose ( [ x , y ] )
# match this exposures PSF stars position against those in the first image of the set .
logging . debug ( "Matching lists" )
idx1 , idx2 = util . match_lists ( pos [ expnums [ 0 ] ] , pos [ expnum ] )
# compute the magnitdue offset between the current frame and the reference .
dmags = numpy . ma . array ( mags [ expnums [ 0 ] ] [ "MAG" ] - apcor [ expnums [ 0 ] ] [ 2 ] - ( mags [ expnum ] [ "MAG" ] [ idx1 ] - apcor [ expnum ] [ 2 ] ) , mask = idx1 . mask )
dmags . sort ( )
logging . debug ( "Computed dmags between input and reference: {}" . format ( dmags ) )
error_count = 0
error_count += 1
logging . debug ( "{}" . format ( error_count ) )
# compute the median and determine if that shift is small compared to the scatter .
try :
midx = int ( numpy . sum ( numpy . any ( [ ~ dmags . mask ] , axis = 0 ) ) / 2.0 )
dmag = float ( dmags [ midx ] )
logging . debug ( "Computed a mag delta of: {}" . format ( dmag ) )
except Exception as e :
logging . error ( str ( e ) )
logging . error ( "Failed to compute mag offset between plant and found using: {}" . format ( dmags ) )
dmag = 99.99
error_count += 1
logging . debug ( "{}" . format ( error_count ) )
try :
if math . fabs ( dmag ) > 3 * ( dmags . std ( ) + 0.01 ) :
logging . warning ( "Magnitude shift {} between {} and {} is large: {}" . format ( dmag , expnums [ 0 ] , expnum , shifts ) )
except Exception as e :
logging . error ( str ( e ) )
error_count += 1
logging . debug ( "{}" . format ( error_count ) )
shifts [ 'dmag' ] = dmag
shifts [ 'emag' ] = dmags . std ( )
shifts [ 'nmag' ] = len ( dmags . mask ) - dmags . mask . sum ( )
shifts [ 'dmjd' ] = mjdates [ expnums [ 0 ] ] - mjdates [ expnum ]
shift_file = os . path . basename ( storage . get_uri ( expnum , ccd , version , '.shifts' ) )
error_count += 1
logging . debug ( "{}" . format ( error_count ) )
try :
fh = open ( shift_file , 'w' )
fh . write ( json . dumps ( shifts , sort_keys = True , indent = 4 , separators = ( ',' , ': ' ) ) )
fh . write ( '\n' )
fh . close ( )
except Exception as e :
logging . error ( "Creation of SHIFTS file failed while trying to write: {}" . format ( shifts ) )
raise e
error_count += 1
logging . debug ( "{}" . format ( error_count ) )
if not dry_run :
storage . copy ( shift_file , storage . get_uri ( expnum , ccd , version , '.shifts' ) ) |
def handle_other ( self , response ) :
"""Handles all responses with the exception of 401s .
This is necessary so that we can authenticate responses if requested""" | log . debug ( "handle_other(): Handling: %d" % response . status_code )
if self . mutual_authentication in ( REQUIRED , OPTIONAL ) and not self . auth_done :
is_http_error = response . status_code >= 400
if _negotiate_value ( response ) is not None :
log . debug ( "handle_other(): Authenticating the server" )
if not self . authenticate_server ( response ) : # Mutual authentication failure when mutual auth is wanted ,
# raise an exception so the user doesn ' t use an untrusted
# response .
log . error ( "handle_other(): Mutual authentication failed" )
raise MutualAuthenticationError ( "Unable to authenticate " "{0}" . format ( response ) )
# Authentication successful
log . debug ( "handle_other(): returning {0}" . format ( response ) )
self . auth_done = True
return response
elif is_http_error or self . mutual_authentication == OPTIONAL :
if not response . ok :
log . error ( "handle_other(): Mutual authentication unavailable " "on {0} response" . format ( response . status_code ) )
if ( self . mutual_authentication == REQUIRED and self . sanitize_mutual_error_response ) :
return SanitizedResponse ( response )
else :
return response
else : # Unable to attempt mutual authentication when mutual auth is
# required , raise an exception so the user doesn ' t use an
# untrusted response .
log . error ( "handle_other(): Mutual authentication failed" )
raise MutualAuthenticationError ( "Unable to authenticate " "{0}" . format ( response ) )
else :
log . debug ( "handle_other(): returning {0}" . format ( response ) )
return response |
def _calculate_num_queries ( self ) :
"""Calculate the total number of request and response queries .
Used for count header and count table .""" | request_totals = self . _totals ( "request" )
response_totals = self . _totals ( "response" )
return request_totals [ 2 ] + response_totals [ 2 ] |
def getItemDPList ( self ) :
"""Returns list of item , dp pairs corresponding to content of listview .
Not - yet - saved items will have dp = None .""" | itemlist = [ ( item , item . _dp ) for item in self . iterator ( ) ]
return itemlist |
def init_account ( self ) :
"""Setup a new GitHub account .""" | ghuser = self . api . me ( )
# Setup local access tokens to be used by the webhooks
hook_token = ProviderToken . create_personal ( 'github-webhook' , self . user_id , scopes = [ 'webhooks:event' ] , is_internal = True , )
# Initial structure of extra data
self . account . extra_data = dict ( id = ghuser . id , login = ghuser . login , name = ghuser . name , tokens = dict ( webhook = hook_token . id , ) , repos = dict ( ) , last_sync = iso_utcnow ( ) , )
db . session . add ( self . account )
# Sync data from GitHub , but don ' t check repository hooks yet .
self . sync ( hooks = False ) |
def digit ( nstr , schema ) :
"""! ~ ~ digit
'0123456789 ' . isdigit ( ) or 123456789""" | if isinstance ( nstr , int ) :
nstr = str ( nstr )
elif not isinstance ( nstr , basestring ) :
return False
return nstr . isdigit ( ) |
def _unwrap_obj ( self , fobj , fun ) :
"""Unwrap decorators .""" | try :
prev_func_obj , next_func_obj = ( fobj . f_globals [ fun ] , getattr ( fobj . f_globals [ fun ] , "__wrapped__" , None ) , )
while next_func_obj :
prev_func_obj , next_func_obj = ( next_func_obj , getattr ( next_func_obj , "__wrapped__" , None ) , )
return ( prev_func_obj , inspect . getfile ( prev_func_obj ) . replace ( ".pyc" , "py" ) )
except ( KeyError , AttributeError , TypeError ) : # KeyErrror : fun not in fobj . f _ globals
# AttributeError : fobj . f _ globals does not have
# a _ _ wrapped _ _ attribute
# TypeError : pref _ func _ obj does not have a file associated with it
return None , None |
def is_contiguous ( self ) :
'''Return whether entire collection is contiguous .''' | previous = None
for index in self . indexes :
if previous is None :
previous = index
continue
if index != ( previous + 1 ) :
return False
previous = index
return True |
def inspect_current_object ( self ) :
"""Inspect current object in the Help plugin""" | editor = self . get_current_editor ( )
editor . sig_display_signature . connect ( self . display_signature_help )
line , col = editor . get_cursor_line_column ( )
editor . request_hover ( line , col ) |
def _parse_downloaded_items ( self , result , camera , path ) :
"""Parse downloaded videos .""" | for item in result :
try :
created_at = item [ 'created_at' ]
camera_name = item [ 'camera_name' ]
is_deleted = item [ 'deleted' ]
address = item [ 'address' ]
except KeyError :
_LOGGER . info ( "Missing clip information, skipping..." )
continue
if camera_name not in camera and 'all' not in camera :
_LOGGER . debug ( "Skipping videos for %s." , camera_name )
continue
if is_deleted :
_LOGGER . debug ( "%s: %s is marked as deleted." , camera_name , address )
continue
clip_address = "{}{}" . format ( self . urls . base_url , address )
filename = "{}_{}.mp4" . format ( camera_name , created_at )
filename = os . path . join ( path , filename )
if os . path . isfile ( filename ) :
_LOGGER . info ( "%s already exists, skipping..." , filename )
continue
response = api . http_get ( self , url = clip_address , stream = True , json = False )
with open ( filename , 'wb' ) as vidfile :
copyfileobj ( response . raw , vidfile )
_LOGGER . info ( "Downloaded video to %s" , filename ) |
def conjugate_gradient ( op , x , rhs , niter , callback = None ) :
"""Optimized implementation of CG for self - adjoint operators .
This method solves the inverse problem ( of the first kind ) : :
A ( x ) = y
for a linear and self - adjoint ` Operator ` ` ` A ` ` .
It uses a minimum amount of memory copies by applying re - usable
temporaries and in - place evaluation .
The method is described ( for linear systems ) in a
` Wikipedia article
< https : / / en . wikipedia . org / wiki / Conjugate _ gradient _ method > ` _ .
Parameters
op : linear ` Operator `
Operator in the inverse problem . It must be linear and
self - adjoint . This implies in particular that its domain and
range are equal .
x : ` ` op . domain ` ` element
Element to which the result is written . Its initial value is
used as starting point of the iteration , and its values are
updated in each iteration step .
rhs : ` ` op . range ` ` element
Right - hand side of the equation defining the inverse problem .
niter : int
Number of iterations .
callback : callable , optional
Object executing code per iteration , e . g . plotting each iterate .
See Also
conjugate _ gradient _ normal : Solver for nonsymmetric matrices""" | # TODO : add a book reference
# TODO : update doc
if op . domain != op . range :
raise ValueError ( 'operator needs to be self-adjoint' )
if x not in op . domain :
raise TypeError ( '`x` {!r} is not in the domain of `op` {!r}' '' . format ( x , op . domain ) )
r = op ( x )
r . lincomb ( 1 , rhs , - 1 , r )
# r = rhs - A x
p = r . copy ( )
d = op . domain . element ( )
# Extra storage for storing A x
sqnorm_r_old = r . norm ( ) ** 2
# Only recalculate norm after update
if sqnorm_r_old == 0 : # Return if no step forward
return
for _ in range ( niter ) :
op ( p , out = d )
# d = A p
inner_p_d = p . inner ( d )
if inner_p_d == 0.0 : # Return if step is 0
return
alpha = sqnorm_r_old / inner_p_d
x . lincomb ( 1 , x , alpha , p )
# x = x + alpha * p
r . lincomb ( 1 , r , - alpha , d )
# r = r - alpha * d
sqnorm_r_new = r . norm ( ) ** 2
beta = sqnorm_r_new / sqnorm_r_old
sqnorm_r_old = sqnorm_r_new
p . lincomb ( 1 , r , beta , p )
# p = s + b * p
if callback is not None :
callback ( x ) |
def sasdata2dataframe ( self , table : str , libref : str = '' , dsopts : dict = None , method : str = 'MEMORY' , ** kwargs ) -> 'pd.DataFrame' :
"""This method exports the SAS Data Set to a Pandas Data Frame , returning the Data Frame object .
SASdata object that refers to the Sas Data Set you want to export to a Pandas Data Frame
: param table : the name of the SAS Data Set you want to export to a Pandas Data Frame
: param libref : the libref for the SAS Data Set .
: param dsopts : a dictionary containing any of the following SAS data set options ( where , drop , keep , obs , firstobs ) :
- where is a string
- keep are strings or list of strings .
- drop are strings or list of strings .
- obs is a numbers - either string or int
- first obs is a numbers - either string or int
- format is a string or dictionary { var : format }
. . code - block : : python
{ ' where ' : ' msrp < 20000 and make = " Ford " '
' keep ' : ' msrp enginesize Cylinders Horsepower Weight '
' drop ' : [ ' msrp ' , ' enginesize ' , ' Cylinders ' , ' Horsepower ' , ' Weight ' ]
' obs ' : 10
' firstobs ' : ' 12'
' format ' : { ' money ' : ' dollar10 ' , ' time ' : ' tod5 . ' }
: param method : defaults to MEMORY ; the original method . CSV is the other choice which uses an intermediary csv file ; faster for large data
: param kwargs : dictionary
: return : Pandas data frame""" | dsopts = dsopts if dsopts is not None else { }
if self . exist ( table , libref ) == 0 :
print ( 'The SAS Data Set ' + libref + '.' + table + ' does not exist' )
return None
if self . nosub :
print ( "too complicated to show the code, read the source :), sorry." )
return None
else :
return self . _io . sasdata2dataframe ( table , libref , dsopts , method = method , ** kwargs ) |
def dimension_values ( self , dimension , expanded = True , flat = True ) :
"""Return the values along the requested dimension .
Args :
dimension : The dimension to return values for
expanded ( bool , optional ) : Whether to expand values
Whether to return the expanded values , behavior depends
on the type of data :
* Columnar : If false returns unique values
* Geometry : If false returns scalar values per geometry
* Gridded : If false returns 1D coordinates
flat ( bool , optional ) : Whether to flatten array
Returns :
NumPy array of values along the requested dimension""" | dim = self . get_dimension ( dimension , strict = True )
return self . interface . values ( self , dim , expanded , flat ) |
def format_role ( name , rawtext , text , lineno , inliner , options = None , content = None ) :
"""Include Python object value , rendering it to text using str .
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages . Both are allowed to be
empty .
: param make _ node : A callable which accepts ( rawtext , app , prefixed _ name , obj , parent , modname , options ) and which returns a node
: param name : The role name used in the document .
: param rawtext : The entire markup snippet , with role .
: param text : The text marked with the role .
: param lineno : The line number where rawtext appears in the input .
: param inliner : The inliner instance that called us .
: param options : Directive options for customization .
: param content : The directive content for customization .""" | if options is None :
options = { }
if content is None :
content = [ ]
name , _ , format_spec = tuple ( field . strip ( ) for field in text . partition ( "," ) )
try :
prefixed_name , obj , parent , modname = import_by_name ( name )
except ImportError :
msg = inliner . reporter . error ( "Could not locate Python object {}" . format ( text ) , line = lineno )
prb = inliner . problematic ( rawtext , rawtext , msg )
return [ prb ] , [ msg ]
app = inliner . document . settings . env . app
try :
formatted_value = format ( obj , format_spec )
except ValueError as value_error :
msg = inliner . reporter . error ( "Format error in {}: {}" . format ( text , value_error ) , line = lineno )
prb = inliner . problematic ( rawtext , rawtext , msg )
return [ prb ] , [ msg ]
node = nodes . Text ( formatted_value , rawsource = rawtext )
return [ node ] , [ ] |
def check_payment_v2 ( state_engine , state_op_type , nameop , fee_block_id , token_address , burn_address , name_fee , block_id ) :
"""Verify that for a version - 2 namespace ( burn - to - creator ) , the nameop paid the right amount of BTC or Stacks .
It can pay either through a preorder ( for registers ) , or directly ( for renewals )
Return { ' status ' : True , ' tokens _ paid ' : . . . , ' token _ units ' : . . . } if so
Return { ' status ' : False } if not .""" | # priced in BTC only if the namespace creator can receive name fees .
# once the namespace switches over to burning , then the name creator can pay in Stacks as well .
assert name_fee is not None
assert isinstance ( name_fee , ( int , long ) )
epoch_features = get_epoch_features ( block_id )
name = nameop [ 'name' ]
namespace_id = get_namespace_from_name ( name )
name_without_namespace = get_name_from_fq_name ( name )
namespace = state_engine . get_namespace ( namespace_id )
assert namespace [ 'version' ] == NAMESPACE_VERSION_PAY_TO_CREATOR
# need to be in the right epoch - - i . e . pay - to - creator needs to be a feature
if EPOCH_FEATURE_NAMESPACE_BURN_TO_CREATOR not in epoch_features :
log . warning ( "Name '{}' was created in namespace '{}', with cversion bits 0x{:x}, which is not supported in this epoch" . format ( name , namespace [ 'namespace_id' ] , namespace [ 'version' ] ) )
return { 'status' : False }
# check burn address
receive_fees_period = get_epoch_namespace_receive_fees_period ( block_id , namespace [ 'namespace_id' ] )
expected_burn_address = None
tokens_allowed = None
# can only burn to namespace if the namespace is young enough ( starts counting from NAMESPACE _ REVEAL )
# can only pay in tokens if the register takes place after the pay - to - creator period ( receive _ fees _ period ) expires
if namespace [ 'reveal_block' ] + receive_fees_period >= block_id :
log . debug ( "Register must pay to v2 namespace address {}" . format ( namespace [ 'address' ] ) )
expected_burn_address = namespace [ 'address' ]
tokens_allowed = False
else :
log . debug ( "Register must pay to burn address {}" . format ( BLOCKSTACK_BURN_ADDRESS ) )
expected_burn_address = BLOCKSTACK_BURN_ADDRESS
tokens_allowed = True
if burn_address != expected_burn_address :
log . warning ( "Buyer of {} used the wrong burn address ({}): expected {}" . format ( name , burn_address , expected_burn_address ) )
return { 'status' : False }
# allowed to pay in Stacks ?
if EPOCH_FEATURE_NAMEOPS_COST_TOKENS in epoch_features : # did we pay any stacks ?
res = get_stacks_payment ( state_engine , nameop , state_op_type )
if res [ 'status' ] : # paid something in Stacks . Will ignore BTC .
if not tokens_allowed :
log . warning ( 'Buyer of {} paid in Stacks, but should have paid in BTC to the namespace creator' . format ( name ) )
return { 'status' : False }
res = check_payment_in_stacks ( state_engine , nameop , state_op_type , fee_block_id )
if not res [ 'status' ] :
log . warning ( "Buyer of {} paid in Stacks, but did not pay enough" . format ( name ) )
return { 'status' : False }
tokens_paid = res [ 'tokens_paid' ]
token_units = res [ 'token_units' ]
return { 'status' : True , 'tokens_paid' : tokens_paid , 'token_units' : token_units }
# did not pay in stacks tokens , or this isn ' t allowed yet
btc_price = price_name ( name_without_namespace , namespace , fee_block_id )
# price reflects namespace version
# fee must be high enough ( either the preorder paid the right fee at the preorder block height ,
# or the renewal paid the right fee at the renewal height )
if name_fee < btc_price :
log . warning ( "Name '%s' costs %s satoshis, but paid %s satoshis" % ( name , btc_price , name_fee ) )
return { 'status' : False }
log . debug ( 'Paid {} satoshis for {} to {}' . format ( name_fee , name , burn_address ) )
return { 'status' : True , 'tokens_paid' : name_fee , 'token_units' : 'BTC' } |
def fix_related_item_tag ( dom ) :
"""Remove < mods : relatedItem > tag in case that there is only < mods : location >
subtag .""" | location = dom . match ( "mods:mods" , "mods:relatedItem" , "mods:location" )
if not location :
return
location = first ( location )
location . replaceWith ( dhtmlparser . HTMLElement ( ) )
# remove whole < mods : relatedItem > tag , if there is nothing else left in it
related_item = dom . match ( "mods:mods" , "mods:relatedItem" )
related_item = first ( related_item )
if not related_item . getContent ( ) . strip ( ) :
related_item . replaceWith ( dhtmlparser . HTMLElement ( ) ) |
def count_function ( func ) :
"""Decorator for functions that return a collection ( technically a dict of collections ) that should be
counted up . Also automatically falls back to the Cohort - default filter _ fn and normalized _ per _ mb if
not specified .""" | # Fall back to Cohort - level defaults .
@ use_defaults
@ wraps ( func )
def wrapper ( row , cohort , filter_fn = None , normalized_per_mb = None , ** kwargs ) :
per_patient_data = func ( row = row , cohort = cohort , filter_fn = filter_fn , normalized_per_mb = normalized_per_mb , ** kwargs )
patient_id = row [ "patient_id" ]
if patient_id in per_patient_data :
count = len ( per_patient_data [ patient_id ] )
if normalized_per_mb :
count /= float ( get_patient_to_mb ( cohort ) [ patient_id ] )
return count
return np . nan
return wrapper |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.