signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def read_legacy ( filename ) :
"""Use VTK ' s legacy reader to read a file""" | reader = vtk . vtkDataSetReader ( )
reader . SetFileName ( filename )
# Ensure all data is fetched with poorly formated legacy files
reader . ReadAllScalarsOn ( )
reader . ReadAllColorScalarsOn ( )
reader . ReadAllNormalsOn ( )
reader . ReadAllTCoordsOn ( )
reader . ReadAllVectorsOn ( )
# Perform the read
reader . Update ( )
output = reader . GetOutputDataObject ( 0 )
if output is None :
raise AssertionError ( 'No output when using VTKs legacy reader' )
return vtki . wrap ( output ) |
def check_redis_connected ( app_configs , ** kwargs ) :
"""A Django check to connect to the default redis connection
using ` ` django _ redis . get _ redis _ connection ` ` and see if Redis
responds to a ` ` PING ` ` command .""" | import redis
from django_redis import get_redis_connection
errors = [ ]
try :
connection = get_redis_connection ( 'default' )
except redis . ConnectionError as e :
msg = 'Could not connect to redis: {!s}' . format ( e )
errors . append ( checks . Error ( msg , id = health . ERROR_CANNOT_CONNECT_REDIS ) )
except NotImplementedError as e :
msg = 'Redis client not available: {!s}' . format ( e )
errors . append ( checks . Error ( msg , id = health . ERROR_MISSING_REDIS_CLIENT ) )
except ImproperlyConfigured as e :
msg = 'Redis misconfigured: "{!s}"' . format ( e )
errors . append ( checks . Error ( msg , id = health . ERROR_MISCONFIGURED_REDIS ) )
else :
result = connection . ping ( )
if not result :
msg = 'Redis ping failed'
errors . append ( checks . Error ( msg , id = health . ERROR_REDIS_PING_FAILED ) )
return errors |
def clear ( self ) :
"""Clear the cache , setting it to its initial state .""" | self . name . clear ( )
self . path . clear ( )
self . generated = False |
def read ( filename = 'cache' ) :
"""parameter : file _ path - path to cache file
return : data after parsing json file""" | cache_path = get_cache_path ( filename )
if not os . path . exists ( cache_path ) or os . stat ( cache_path ) . st_size == 0 :
return None
with open ( cache_path , 'r' ) as file :
return json . load ( file ) |
def del_hyperedge ( self , hyperedge ) :
"""Delete the given hyperedge .
@ type hyperedge : hyperedge
@ param hyperedge : Hyperedge identifier .""" | if ( hyperedge in self . hyperedges ( ) ) :
for n in self . edge_links [ hyperedge ] :
self . node_links [ n ] . remove ( hyperedge )
del ( self . edge_links [ hyperedge ] )
self . del_edge_labeling ( hyperedge )
self . graph . del_node ( ( hyperedge , 'h' ) ) |
def export ( self , nidm_version , export_dir ) :
"""Create prov entities and activities .""" | # Create " Cluster Labels Map " entity
self . add_attributes ( ( ( PROV [ 'type' ] , self . type ) , ( NIDM_IN_COORDINATE_SPACE , self . coord_space . id ) , ( PROV [ 'label' ] , self . label ) ) ) |
def parse ( self , stream , media_type = None , parser_context = None ) :
"""Parses the incoming bytestream as a multipart encoded form ,
and returns a DataAndFiles object .
` . data ` will be a ` QueryDict ` containing all the form parameters .
` . files ` will be a ` QueryDict ` containing all the form files .""" | parser_context = parser_context or { }
request = parser_context [ 'request' ]
encoding = parser_context . get ( 'encoding' , settings . DEFAULT_CHARSET )
meta = request . META . copy ( )
meta [ 'CONTENT_TYPE' ] = media_type
upload_handlers = request . upload_handlers
try :
parser = DjangoMultiPartParser ( meta , stream , upload_handlers , encoding )
data , files = parser . parse ( )
return DataAndFiles ( data , files )
except MultiPartParserError as exc :
raise ParseError ( 'Multipart form parse error - %s' % six . text_type ( exc ) ) |
def update_telemetry_configurations ( self , configuration , timeout = - 1 ) :
"""Updates the telemetry configuration of a logical interconnect . Changes to the telemetry configuration are
asynchronously applied to all managed interconnects .
Args :
configuration :
The telemetry configuration for the logical interconnect .
timeout :
Timeout in seconds . Wait for task completion by default . The timeout does not abort the operation in
OneView , just stops waiting for its completion .
Returns :
dict : The Logical Interconnect .""" | telemetry_conf_uri = self . _get_telemetry_configuration_uri ( )
default_values = self . _get_default_values ( self . SETTINGS_TELEMETRY_CONFIG_DEFAULT_VALUES )
configuration = self . _helper . update_resource_fields ( configuration , default_values )
return self . _helper . update ( configuration , uri = telemetry_conf_uri , timeout = timeout ) |
def force_unicode ( s , encoding = 'utf-8' , errors = 'strict' ) :
"""Similar to smart _ text , except that lazy instances are resolved to
strings , rather than kept as lazy objects .""" | # Handle the common case first , saves 30-40 % when s is an instance of
# six . text _ type . This function gets called often in that setting .
if isinstance ( s , six . text_type ) :
return s
if not isinstance ( s , six . string_types ) :
if six . PY3 :
if isinstance ( s , bytes ) :
s = six . text_type ( s , encoding , errors )
else :
s = six . text_type ( s )
else :
s = six . text_type ( bytes ( s ) , encoding , errors )
else : # Note : We use . decode ( ) here , instead of six . text _ type ( s ,
# encoding , errors ) , so that if s is a SafeBytes , it ends up being
# a SafeText at the end .
s = s . decode ( encoding , errors )
return s |
def check_shape ( meth ) :
"""Decorator for larray magic methods , to ensure that the operand has
the same shape as the array .""" | @ wraps ( meth )
def wrapped_meth ( self , val ) :
if isinstance ( val , ( larray , numpy . ndarray ) ) :
if val . shape != self . _shape :
raise ValueError ( "shape mismatch: objects cannot be broadcast to a single shape" )
return meth ( self , val )
return wrapped_meth |
def detach ( self ) :
"""Detach the source from its customer .""" | # First , wipe default source on all customers that use this .
Customer . objects . filter ( default_source = self . id ) . update ( default_source = None )
try : # TODO - we could use the return value of sync _ from _ stripe _ data
# or call its internals - self . _ sync / _ attach _ objects _ hook etc here
# to update ` self ` at this point ?
self . sync_from_stripe_data ( self . api_retrieve ( ) . detach ( ) )
return True
except ( InvalidRequestError , NotImplementedError ) : # The source was already detached . Resyncing .
# NotImplementedError is an artifact of stripe - python < 2.0
# https : / / github . com / stripe / stripe - python / issues / 376
self . sync_from_stripe_data ( self . api_retrieve ( ) )
return False |
def __hosting_wechat_img ( self , content_info , hosting_callback ) :
"""将微信明细中图片托管到云端 , 同时将html页面中的对应图片替换
Parameters
content _ info : dict 微信文章明细字典
' content _ img _ list ' : [ ] , # 从微信文章解析出的原始图片列表
' content _ html ' : ' ' , # 从微信文章解析出文章的内容
hosting _ callback : callable
托管回调函数 , 传入单个图片链接 , 返回托管后的图片链接
Returns
dict
' content _ img _ list ' : ' ' , # 托管后的图片列表
' content _ html ' : ' ' , # 图片链接为托管后的图片链接内容""" | assert callable ( hosting_callback )
content_img_list = content_info . pop ( "content_img_list" )
content_html = content_info . pop ( "content_html" )
for idx , img_url in enumerate ( content_img_list ) :
hosting_img_url = hosting_callback ( img_url )
if not hosting_img_url : # todo 定义标准异常
raise Exception ( )
content_img_list [ idx ] = hosting_img_url
content_html = content_html . replace ( img_url , hosting_img_url )
return dict ( content_img_list = content_img_list , content_html = content_html ) |
def a_stays_connected ( ctx ) :
"""Stay connected .""" | ctx . ctrl . connected = True
ctx . device . connected = False
return True |
def strip_line_magic_v3 ( cls , line ) :
"""strip _ line _ magic ( ) implementation for Python 3""" | matches = re . findall ( "run_line_magic\(([^]]+)" , line )
if matches and matches [ 0 ] : # This line contains the pattern
match = matches [ 0 ]
if match [ - 1 ] == ')' :
match = match [ : - 1 ]
# Just because the re way is hard
magic_kind , stripped = eval ( match )
else :
stripped = line
magic_kind = ""
return stripped , magic_kind |
def save ( self ) :
"""save the current session
override , if session was saved earlier""" | if self . path :
self . _saveState ( self . path )
else :
self . saveAs ( ) |
def typical_price ( close_data , high_data , low_data ) :
"""Typical Price .
Formula :
TPt = ( HIGHt + LOWt + CLOSEt ) / 3""" | catch_errors . check_for_input_len_diff ( close_data , high_data , low_data )
tp = [ ( high_data [ idx ] + low_data [ idx ] + close_data [ idx ] ) / 3 for idx in range ( 0 , len ( close_data ) ) ]
return np . array ( tp ) |
def lookup ( self , allowed_types , ** kwargs ) :
"""Lookup an object of type ( allowed _ types ) . kwargs is sent
directly to the catalog .""" | at = getToolByName ( self , 'archetype_tool' )
for portal_type in allowed_types :
catalog = at . catalog_map . get ( portal_type , [ None ] ) [ 0 ]
catalog = getToolByName ( self , catalog )
kwargs [ 'portal_type' ] = portal_type
brains = catalog ( ** kwargs )
if brains :
return brains |
def _count_objs ( self , obj , path = None , ** kwargs ) :
"""cycles through the object and adds in count values
Args :
obj : the object to parse
path : the current path
kwargs :
current : a dictionary of counts for current call
sub _ val : the value to use for subtotal aggregation""" | sub_val = None
# pdb . set _ trace ( )
if isinstance ( obj , dict ) :
for key , value in obj . items ( ) :
if isinstance ( value , ( list , dict ) ) :
kwargs = self . _count_objs ( value , self . make_path ( key , path ) , ** kwargs )
else :
if self . make_path ( key , path ) == self . sub_total : # pdb . set _ trace ( )
sub_val = value
kwargs [ 'current' ] = self . _increment_prop ( key , path , ** kwargs )
elif isinstance ( obj , list ) :
for item in obj :
if isinstance ( item , ( list , dict ) ) :
kwargs = self . _count_objs ( item , path , ** kwargs )
else :
if path == self . sub_total :
pdb . set_trace ( )
sub_val = item
kwargs [ 'current' ] = self . _increment_prop ( path , ** kwargs )
else :
kwargs [ 'current' ] = self . _increment_prop ( path , ** kwargs )
if path == self . sub_total :
pdb . set_trace ( )
sub_val = item
if kwargs . get ( 'sub_val' ) is None :
kwargs [ 'sub_val' ] = sub_val
return kwargs |
def is_ancestor ( self , commit1 , commit2 , patch = False ) :
"""Returns True if commit1 is a direct ancestor of commit2 , or False
otherwise .
This method considers a commit to be a direct ancestor of itself""" | result = self . hg ( "log" , "-r" , "first(%s::%s)" % ( commit1 , commit2 ) , "--template" , "exists" , patch = patch )
return "exists" in result |
def accounting ( dbfile , config ) :
'''update radius accounting''' | try :
nas_id = config . get ( 'DEFAULT' , 'nas_id' )
nas_addr = config . get ( 'DEFAULT' , 'nas_addr' )
secret = config . get ( 'DEFAULT' , 'radius_secret' )
radius_addr = config . get ( 'DEFAULT' , 'radius_addr' )
radius_acct_port = config . getint ( 'DEFAULT' , 'radius_acct_port' )
radius_timeout = config . getint ( 'DEFAULT' , 'radius_timeout' )
status_dbfile = config . get ( 'DEFAULT' , 'statusdb' )
clients = statusdb . query_client ( status_dbfile )
ctime = int ( time . time ( ) )
for cli in clients :
if ( ctime - int ( cli [ 'uptime' ] ) ) < int ( cli [ 'acct_interval' ] ) :
continue
session_id = cli [ 'session_id' ]
req = { 'User-Name' : cli [ 'username' ] }
req [ 'Acct-Status-Type' ] = ACCT_UPDATE
req [ 'Acct-Session-Id' ] = session_id
req [ "Acct-Output-Octets" ] = int ( cli [ 'outbytes' ] )
req [ "Acct-Input-Octets" ] = int ( cli [ 'inbytes' ] )
req [ 'Acct-Session-Time' ] = ( ctime - int ( cli [ 'ctime' ] ) )
req [ "NAS-IP-Address" ] = nas_addr
req [ "NAS-Port-Id" ] = '0/0/0:0.0'
req [ "NAS-Port" ] = 0
req [ "Service-Type" ] = "Login-User"
req [ "NAS-Identifier" ] = nas_id
req [ "Called-Station-Id" ] = '00:00:00:00:00:00'
req [ "Calling-Station-Id" ] = '00:00:00:00:00:00'
req [ "Framed-IP-Address" ] = cli [ 'userip' ]
def update_uptime ( radresp ) :
statusdb . update_client_uptime ( status_dbfile , session_id )
log . msg ( 'online<%s> client accounting update' % session_id )
def onresp ( r ) :
try :
update_uptime ( r )
except Exception as e :
log . err ( 'online update uptime error' )
log . err ( e )
d = client . send_acct ( str ( secret ) , get_dictionary ( ) , radius_addr , acctport = radius_acct_port , debug = True , ** req )
d . addCallbacks ( onresp , log . err )
except Exception , e :
log . err ( 'accounting error' )
log . err ( e ) |
def geocode ( self , string , bounds = None , region = None , language = None , sensor = False ) :
'''Geocode an address .
Pls refer to the Google Maps Web API for the details of the parameters''' | if isinstance ( string , unicode ) :
string = string . encode ( 'utf-8' )
params = { 'address' : self . format_string % string , 'sensor' : str ( sensor ) . lower ( ) }
if bounds :
params [ 'bounds' ] = bounds
if region :
params [ 'region' ] = region
if language :
params [ 'language' ] = language
if not self . premier :
url = self . get_url ( params )
else :
url = self . get_signed_url ( params )
return self . GetService_url ( url ) |
def return_labels_numpy ( self , original = False ) :
"""Returns a 2d numpy array of labels
Parameters
original : if True , will return original labels , if False , will return transformed labels ( as defined by
label _ dict ) , default value : False
Returns
A numpy array of labels , each row corresponds to a single datapoint""" | if self . _prepopulated is False :
raise errors . EmptyDatabase ( self . dbpath )
else :
engine = create_engine ( 'sqlite:////' + self . dbpath )
trainset . Base . metadata . create_all ( engine )
session_cl = sessionmaker ( bind = engine )
session = session_cl ( )
tmp_object = session . query ( trainset . TrainSet ) . get ( 1 )
columns_amt = len ( tmp_object . labels [ 'original' ] )
return_array = np . zeros ( [ self . points_amt , columns_amt ] )
for i in enumerate ( session . query ( trainset . TrainSet ) . order_by ( trainset . TrainSet . id ) ) :
if original is False :
return_array [ i [ 0 ] , : ] = i [ 1 ] . labels [ 'transformed' ]
else :
return_array [ i [ 0 ] , : ] = i [ 1 ] . labels [ 'original' ]
session . close ( )
return return_array |
def _string_in_table ( self , candidate : str ) -> List [ str ] :
"""Checks if the string occurs in the table , and if it does , returns the names of the columns
under which it occurs . If it does not , returns an empty list .""" | candidate_column_names : List [ str ] = [ ]
# First check if the entire candidate occurs as a cell .
if candidate in self . _string_column_mapping :
candidate_column_names = self . _string_column_mapping [ candidate ]
# If not , check if it is a substring pf any cell value .
if not candidate_column_names :
for cell_value , column_names in self . _string_column_mapping . items ( ) :
if candidate in cell_value :
candidate_column_names . extend ( column_names )
candidate_column_names = list ( set ( candidate_column_names ) )
return candidate_column_names |
def request ( self , path , action , data = '' ) :
"""To make a request to the API .""" | # Check if the path includes URL or not .
head = self . base_url
if path . startswith ( head ) :
path = path [ len ( head ) : ]
path = quote_plus ( path , safe = '/' )
if not path . startswith ( self . api ) :
path = self . api + path
log . debug ( 'Using path %s' % path )
# If we have data , convert to JSON
if data :
data = json . dumps ( data )
log . debug ( 'Data to sent: %s' % data )
# In case of key authentication
if self . private_key and self . public_key :
timestamp = str ( int ( time . time ( ) ) )
log . debug ( 'Using timestamp: {}' . format ( timestamp ) )
unhashed = path + timestamp + str ( data )
log . debug ( 'Using message: {}' . format ( unhashed ) )
self . hash = hmac . new ( str . encode ( self . private_key ) , msg = unhashed . encode ( 'utf-8' ) , digestmod = hashlib . sha256 ) . hexdigest ( )
log . debug ( 'Authenticating with hash: %s' % self . hash )
self . headers [ 'X-Public-Key' ] = self . public_key
self . headers [ 'X-Request-Hash' ] = self . hash
self . headers [ 'X-Request-Timestamp' ] = timestamp
auth = False
# In case of user credentials authentication
elif self . username and self . password :
auth = requests . auth . HTTPBasicAuth ( self . username , self . password )
# Set unlock reason
if self . unlock_reason :
self . headers [ 'X-Unlock-Reason' ] = self . unlock_reason
log . info ( 'Unlock Reason: %s' % self . unlock_reason )
url = head + path
# Try API request and handle Exceptions
try :
if action == 'get' :
log . debug ( 'GET request %s' % url )
self . req = requests . get ( url , headers = self . headers , auth = auth , verify = False )
elif action == 'post' :
log . debug ( 'POST request %s' % url )
self . req = requests . post ( url , headers = self . headers , auth = auth , verify = False , data = data )
elif action == 'put' :
log . debug ( 'PUT request %s' % url )
self . req = requests . put ( url , headers = self . headers , auth = auth , verify = False , data = data )
elif action == 'delete' :
log . debug ( 'DELETE request %s' % url )
self . req = requests . delete ( url , headers = self . headers , verify = False , auth = auth )
if self . req . content == b'' :
result = None
log . debug ( 'No result returned.' )
else :
result = self . req . json ( )
if 'error' in result and result [ 'error' ] :
raise TPMException ( result [ 'message' ] )
except requests . exceptions . RequestException as e :
log . critical ( "Connection error for " + str ( e ) )
raise TPMException ( "Connection error for " + str ( e ) )
except ValueError as e :
if self . req . status_code == 403 :
log . warning ( url + " forbidden" )
raise TPMException ( url + " forbidden" )
elif self . req . status_code == 404 :
log . warning ( url + " forbidden" )
raise TPMException ( url + " not found" )
else :
message = ( '%s: %s %s' % ( e , self . req . url , self . req . text ) )
log . debug ( message )
raise ValueError ( message )
return result |
def geometry_range ( crd_range , elev , crd_type ) :
"""Range of coordinates . ( e . g . 2 latitude coordinates , and 0 longitude coordinates )
: param crd _ range : Latitude or Longitude values
: param elev : Elevation value
: param crd _ type : Coordinate type , lat or lon
: return dict :""" | d = OrderedDict ( )
coordinates = [ [ ] for i in range ( len ( crd_range ) ) ]
# latitude
if crd_type == "lat" :
for idx , i in enumerate ( crd_range ) :
coordinates [ idx ] = [ crd_range [ idx ] , "nan" ]
if elev :
coordinates [ idx ] . append ( elev )
# longitude
elif crd_type == "lon" :
for idx , i in enumerate ( crd_range ) :
coordinates [ idx ] = [ "nan" , crd_range [ idx ] ]
if elev :
coordinates [ idx ] . append ( elev )
d [ "type" ] = "Range"
d [ "coordinates" ] = coordinates
return d |
def ensure_yx_order ( func ) :
"""Wrap a function to ensure all array arguments are y , x ordered , based on kwarg .""" | @ functools . wraps ( func )
def wrapper ( * args , ** kwargs ) : # Check what order we ' re given
dim_order = kwargs . pop ( 'dim_order' , None )
x_first = _is_x_first_dim ( dim_order )
# If x is the first dimension , flip ( transpose ) every array within the function args .
if x_first :
args = tuple ( _check_and_flip ( arr ) for arr in args )
for k , v in kwargs :
kwargs [ k ] = _check_and_flip ( v )
ret = func ( * args , ** kwargs )
# If we flipped on the way in , need to flip on the way out so that output array ( s )
# match the dimension order of the original input .
if x_first :
return _check_and_flip ( ret )
else :
return ret
# Inject a docstring for the dim _ order argument into the function ' s docstring .
dim_order_doc = """
dim_order : str or ``None``, optional
The ordering of dimensions in passed in arrays. Can be one of ``None``, ``'xy'``,
or ``'yx'``. ``'xy'`` indicates that the dimension corresponding to x is the leading
dimension, followed by y. ``'yx'`` indicates that x is the last dimension, preceded
by y. ``None`` indicates that the default ordering should be assumed,
which is 'yx'. Can only be passed as a keyword argument, i.e.
func(..., dim_order='xy')."""
# Find the first blank line after the start of the parameters section
params = wrapper . __doc__ . find ( 'Parameters' )
blank = wrapper . __doc__ . find ( '\n\n' , params )
wrapper . __doc__ = wrapper . __doc__ [ : blank ] + dim_order_doc + wrapper . __doc__ [ blank : ]
return wrapper |
def asML ( self ) :
"""Convert this vector to the new mllib - local representation .
This does NOT copy the data ; it copies references .
: return : : py : class : ` pyspark . ml . linalg . SparseVector `
. . versionadded : : 2.0.0""" | return newlinalg . SparseVector ( self . size , self . indices , self . values ) |
def logsumexp ( x ) :
"""Numerically stable log ( sum ( exp ( x ) ) ) , also defined in scipy . misc""" | max_x = np . max ( x )
return max_x + np . log ( np . sum ( np . exp ( x - max_x ) ) ) |
def teardown ( self , np = np ) :
"""Lives in zipline . _ _ init _ _ for doctests .""" | if self . old_err is not None :
np . seterr ( ** self . old_err )
if self . old_opts is not None :
np . set_printoptions ( ** self . old_opts ) |
def img2img_transformer2d_tiny ( ) :
"""Tiny params .""" | hparams = img2img_transformer2d_base ( )
hparams . num_decoder_layers = 2
hparams . hidden_size = 128
hparams . batch_size = 4
hparams . max_length = 128
hparams . attention_key_channels = hparams . attention_value_channels = 0
hparams . filter_size = 128
hparams . num_heads = 4
hparams . pos = "timing"
hparams . img_len = 32
return hparams |
def analyse_action ( func ) :
"""Analyse a function .""" | description = inspect . getdoc ( func ) or 'undocumented action'
arguments = [ ]
args , varargs , kwargs , defaults = inspect . getargspec ( func )
if varargs or kwargs :
raise TypeError ( 'variable length arguments for action not allowed.' )
if len ( args ) != len ( defaults or ( ) ) :
raise TypeError ( 'not all arguments have proper definitions' )
for idx , ( arg , definition ) in enumerate ( zip ( args , defaults or ( ) ) ) :
if arg . startswith ( '_' ) :
raise TypeError ( 'arguments may not start with an underscore' )
if not isinstance ( definition , tuple ) :
shortcut = None
default = definition
else :
shortcut , default = definition
argument_type = argument_types [ type ( default ) ]
if isinstance ( default , bool ) and default is True :
arg = 'no-' + arg
arguments . append ( ( arg . replace ( '_' , '-' ) , shortcut , default , argument_type ) )
return func , description , arguments |
def add_message ( request , level , message , extra_tags = '' , fail_silently = False , * args , ** kwargs ) :
"""Attempts to add a message to the request using the ' messages ' app .""" | if hasattr ( request , '_messages' ) :
return request . _messages . add ( level , message , extra_tags , * args , ** kwargs )
if not fail_silently :
raise MessageFailure ( 'You cannot add messages without installing ' 'django.contrib.messages.middleware.MessageMiddleware' ) |
def create_qrcode ( self , qrcode_data ) :
"""创建卡券二维码
: param qrcode _ data : 二维码信息
: return : 二维码 ticket , 可使用 : func : show _ qrcode 换取二维码文件""" | result = self . _post ( 'card/qrcode/create' , data = qrcode_data , result_processor = lambda x : x [ 'ticket' ] )
return result |
def to_pypsa ( network , mode , timesteps ) :
"""Translate graph based grid representation to PyPSA Network
For details from a user perspective see API documentation of
: meth : ` ~ . grid . network . EDisGo . analyze ` of the API class
: class : ` ~ . grid . network . EDisGo ` .
Translating eDisGo ' s grid topology to PyPSA representation is structured
into tranlating the topology and adding time series for components of the
grid . In both cases translation of MV grid only ( ` mode = ' mv ' ` ) , LV grid only
( ` mode = ' lv ' ` ) , MV and LV ( ` mode = None ` ) share some code . The
code is organized as follows
* Medium - voltage only ( ` mode = ' mv ' ` ) : All medium - voltage grid components are
exported by : func : ` mv _ to _ pypsa ` including the LV station . LV grid load
and generation is considered using : func : ` add _ aggregated _ lv _ components ` .
Time series are collected by ` _ pypsa _ load _ timeseries ` ( as example
for loads , generators and buses ) specifying ` mode = ' mv ' ` ) . Timeseries
for aggregated load / generation at substations are determined individually .
* Low - voltage only ( ` mode = ' lv ' ` ) : LV grid topology including the MV - LV
transformer is exported . The slack is defind at primary side of the MV - LV
transformer .
* Both level MV + LV ( ` mode = None ` ) : The entire grid topology is translated to
PyPSA in order to perform a complete power flow analysis in both levels
together . First , both grid levels are translated seperately using
: func : ` mv _ to _ pypsa ` and : func : ` lv _ to _ pypsa ` . Those are merge by
: func : ` combine _ mv _ and _ lv ` . Time series are obtained at once for both grid
levels .
This PyPSA interface is aware of translation errors and performs so checks
on integrity of data converted to PyPSA grid representation
* Sub - graphs / Sub - networks : It is ensured the grid has no islanded parts
* Completeness of time series : It is ensured each component has a time
series
* Buses available : Each component ( load , generator , line , transformer ) is
connected to a bus . The PyPSA representation is check for completeness of
buses .
* Duplicate labels in components DataFrames and components ' time series
DataFrames
Parameters
network : Network
eDisGo grid container
mode : str
Determines grid levels that are translated to
` PyPSA grid representation
< https : / / www . pypsa . org / doc / components . html # network > ` _ . Specify
* None to export MV and LV grid levels . None is the default .
* ( ' mv ' to export MV grid level only . This includes cumulative load and
generation from underlying LV grid aggregated at respective LV
station . This option is implemented , though the rest of edisgo does
not handle it yet . )
* ( ' lv ' to export LV grid level only . This option is not yet
implemented )
timesteps : : pandas : ` pandas . DatetimeIndex < datetimeindex > ` or : pandas : ` pandas . Timestamp < timestamp > `
Timesteps specifies which time steps to export to pypsa representation
and use in power flow analysis .
Returns
PyPSA Network""" | # check if timesteps is array - like , otherwise convert to list ( necessary
# to obtain a dataframe when using . loc in time series functions )
if not hasattr ( timesteps , "__len__" ) :
timesteps = [ timesteps ]
# get topology and time series data
if mode is None :
mv_components = mv_to_pypsa ( network )
lv_components = lv_to_pypsa ( network )
components = combine_mv_and_lv ( mv_components , lv_components )
if list ( components [ 'Load' ] . index . values ) :
timeseries_load_p_set = _pypsa_load_timeseries ( network , mode = mode , timesteps = timesteps )
if len ( list ( components [ 'Generator' ] . index . values ) ) > 1 :
timeseries_gen_p_min , timeseries_gen_p_max = _pypsa_generator_timeseries ( network , mode = mode , timesteps = timesteps )
timeseries_storage_p_min , timeseries_storage_p_max = _pypsa_storage_timeseries ( network , mode = mode , timesteps = timesteps )
if list ( components [ 'Bus' ] . index . values ) :
timeseries_bus_v_set = _pypsa_bus_timeseries ( network , components [ 'Bus' ] . index . tolist ( ) , timesteps = timesteps )
else :
raise ValueError ( "Provide proper mode or leave it empty to export " "entire grid topology." )
# check topology
_check_topology ( components )
# create power flow problem
pypsa_network = PyPSANetwork ( )
pypsa_network . edisgo_mode = mode
pypsa_network . set_snapshots ( timesteps )
# import grid topology to PyPSA network
# buses are created first to avoid warnings
pypsa_network . import_components_from_dataframe ( components [ 'Bus' ] , 'Bus' )
for k , comps in components . items ( ) :
if k is not 'Bus' and not comps . empty :
pypsa_network . import_components_from_dataframe ( comps , k )
# import time series to PyPSA network
if len ( list ( components [ 'Generator' ] . index . values ) ) > 1 :
import_series_from_dataframe ( pypsa_network , timeseries_gen_p_min , 'Generator' , 'p_min_pu' )
import_series_from_dataframe ( pypsa_network , timeseries_gen_p_max , 'Generator' , 'p_max_pu' )
import_series_from_dataframe ( pypsa_network , timeseries_storage_p_min , 'Generator' , 'p_min_pu' )
import_series_from_dataframe ( pypsa_network , timeseries_storage_p_max , 'Generator' , 'p_max_pu' )
if list ( components [ 'Load' ] . index . values ) :
import_series_from_dataframe ( pypsa_network , timeseries_load_p_set , 'Load' , 'p_set' )
if list ( components [ 'Bus' ] . index . values ) :
import_series_from_dataframe ( pypsa_network , timeseries_bus_v_set , 'Bus' , 'v_mag_pu_set' )
_check_integrity_of_pypsa ( pypsa_network )
return pypsa_network |
def create_auth_token ( sender , instance , raw , created , ** kwargs ) :
"""Create token when a user is created ( from rest _ framework ) .""" | if not raw :
if created :
sender . objects . create ( user = instance ) |
def _getNodeFnib ( self , name , valu ) :
'''return a form , norm , info , buid tuple''' | form = self . model . form ( name )
if form is None :
raise s_exc . NoSuchForm ( name = name )
try :
norm , info = form . type . norm ( valu )
except Exception as e :
raise s_exc . BadPropValu ( prop = form . name , valu = valu , mesg = str ( e ) )
buid = s_common . buid ( ( form . name , norm ) )
return form , norm , info , buid |
def reminder_pdf ( self , reminder_id ) :
"""Opens a pdf of a reminder
: param reminder _ id : the reminder id
: return : dict""" | return self . _create_get_request ( resource = REMINDERS , billomat_id = reminder_id , command = PDF ) |
def get_mean_and_stddevs ( self , sites , rup , dists , imt , stddev_types ) : # pylint : disable = too - many - arguments
"""See : meth : ` superclass method
< . base . GroundShakingIntensityModel . get _ mean _ and _ stddevs > `
for specification of input and result values .
Implements the following equations :
Equation ( 8 ) on p . 203 for the bedrock ground motion :
` ` ln ( y _ br ) = c1 + c2 * ( M - 6 ) + c3 * ( M - 6 ) * * 2 - lnR - c4 * R + ln ( ε _ br ) ` `
Equation ( 9 ) on p . 207 gives the site amplification factor :
` ` ln ( F _ s ) = a1 * y _ br + a2 + ln ( δ _ site ) ` `
Equation ( 10 ) on p . 207 for the ground motion at a given site :
` ` y _ site = y _ br * F _ s ` `
Equation ( 11 ) on p . 207 for total standard error at a given site :
` ` σ { ln ( ε _ site ) } = sqrt ( σ { ln ( ε _ br ) } * * 2 + σ { ln ( δ _ site ) } * * 2 ) ` `""" | # obtain coefficients for required intensity measure type
coeffs = self . COEFFS_BEDROCK [ imt ] . copy ( )
# obtain site - class specific coefficients
a_1 , a_2 , sigma_site = self . _get_site_coeffs ( sites , imt )
coeffs . update ( { 'a1' : a_1 , 'a2' : a_2 , 'sigma_site' : sigma_site } )
# compute bedrock motion , equation ( 8)
ln_mean = ( self . _compute_magnitude_terms ( rup , coeffs ) + self . _compute_distance_terms ( dists , coeffs ) )
# adjust for site class , equation ( 10)
ln_mean += self . _compute_site_amplification ( ln_mean , coeffs )
# No need to convert to g since " In [ equation ( 8 ) ] , y _ br = ( SA / g ) "
ln_stddevs = self . _get_stddevs ( coeffs , stddev_types )
return ln_mean , [ ln_stddevs ] |
def AddPoly ( self , poly , smart_duplicate_handling = True ) :
"""Adds a new polyline to the collection .""" | inserted_name = poly . GetName ( )
if poly . GetName ( ) in self . _name_to_shape :
if not smart_duplicate_handling :
raise ShapeError ( "Duplicate shape found: " + poly . GetName ( ) )
print ( "Warning: duplicate shape id being added to collection: " + poly . GetName ( ) )
if poly . GreedyPolyMatchDist ( self . _name_to_shape [ poly . GetName ( ) ] ) < 10 :
print ( " (Skipping as it apears to be an exact duplicate)" )
else :
print ( " (Adding new shape variant with uniquified name)" )
inserted_name = "%s-%d" % ( inserted_name , len ( self . _name_to_shape ) )
self . _name_to_shape [ inserted_name ] = poly |
def result ( self , * args , ** kwargs ) :
"""Construye la consulta SQL""" | prettify = kwargs . get ( 'pretty' , False )
sql = 'UPDATE %s' % self . _class
if prettify :
sql += '\n'
else :
sql += ' '
if self . data :
sql += 'MERGE ' + json . dumps ( self . data )
if prettify :
sql += '\n'
else :
sql += ' '
if self . where_criteria . size ( ) > 0 :
sql += 'WHERE '
sql += self . where_criteria . result ( )
if prettify :
sql += '\n'
else :
sql += ' '
return sql |
def register ( self , name , namespace ) :
"""Register a new namespace with the Configuration object .
Args :
name ( str ) : The name of the section / namespace .
namespace ( namespace . Namespace ) : The Namespace object to store .
Raises :
TypeError : If the namespace is not a Namespace object .
ValueError : If the namespace is already registered .""" | if name in self . _NAMESPACES :
raise ValueError ( "Namespace {0} already exists." . format ( name ) )
if not isinstance ( namespace , ns . Namespace ) :
raise TypeError ( "Namespaces must be of type Namespace." )
self . _NAMESPACES [ name ] = namespace |
def yaml_processor ( entity ) :
'''Unserialize raw POST data in YAML format to a Python data structure .
: param entity : raw POST data''' | if six . PY2 :
body = entity . fp . read ( )
else : # https : / / github . com / cherrypy / cherrypy / pull / 1572
contents = BytesIO ( )
body = entity . fp . read ( fp_out = contents )
contents . seek ( 0 )
body = salt . utils . stringutils . to_unicode ( contents . read ( ) )
try :
cherrypy . serving . request . unserialized_data = salt . utils . yaml . safe_load ( body )
except ValueError :
raise cherrypy . HTTPError ( 400 , 'Invalid YAML document' )
cherrypy . serving . request . raw_body = body |
def space_row ( left , right , filler = ' ' , total_width = - 1 ) :
"""space the data in a row with optional filling
Arguments
left : str , to be aligned left
right : str , to be aligned right
filler : str , default ' ' .
must be of length 1
total _ width : int , width of line .
if negative number is specified ,
then that number of spaces is used between the left and right text
Returns
str""" | left = str ( left )
right = str ( right )
filler = str ( filler ) [ : 1 ]
if total_width < 0 :
spacing = - total_width
else :
spacing = total_width - len ( left ) - len ( right )
return left + filler * spacing + right |
def _draw_tile_layer ( self , tile , layer_name , c_filters , colour , t_filters , x , y , bg ) :
"""Draw the visible geometry in the specified map tile .""" | # Don ' t bother rendering if the tile is not visible
left = ( x + self . _screen . width // 4 ) * 2
top = y + self . _screen . height // 2
if ( left > self . _screen . width or left + self . _size * 2 < 0 or top > self . _screen . height or top + self . _size < 0 ) :
return 0
# Not all layers are available in every tile .
try :
_layer = tile [ layer_name ]
_extent = float ( _layer [ "extent" ] )
except KeyError :
return 0
for _feature in _layer [ "features" ] :
try :
if c_filters and _feature [ "properties" ] [ "class" ] not in c_filters :
continue
if ( t_filters and _feature [ "type" ] not in t_filters and _feature [ "properties" ] [ "type" ] not in t_filters ) :
continue
self . _draw_feature ( _feature , _extent , colour , bg , ( x + self . _screen . width // 4 ) * 2 , y + self . _screen . height // 2 )
except KeyError :
pass
return 1 |
def _check ( self , args ) :
"""Exit in case of multiple exclusive arguments .""" | if sum ( bool ( args [ arg ] ) for arg in self . _mapping ) > 1 :
raise DocoptExit ( _ ( 'These options are mutually exclusive: {0}' , ', ' . join ( self . _mapping ) ) ) |
def to_json ( self , indent = 4 ) :
"""Serialize metas and reference attributes to a JSON string .
Keyword Arguments :
indent ( int ) : Space indentation , default to ` ` 4 ` ` .
Returns :
string : JSON datas .""" | agregate = { 'metas' : self . metas , }
agregate . update ( { k : getattr ( self , k ) for k in self . _rule_attrs } )
return json . dumps ( agregate , indent = indent ) |
def _waiting_expect ( self ) :
'''` ` True ` ` when the client is waiting for 100 Continue .''' | if self . _expect_sent is None :
if self . environ . get ( 'HTTP_EXPECT' , '' ) . lower ( ) == '100-continue' :
return True
self . _expect_sent = ''
return False |
def dn ( self , fraction , n = None ) :
r'''Computes the diameter at which a specified ` fraction ` of the
distribution falls under . Utilizes a bounded solver to search for the
desired diameter .
Parameters
fraction : float
Fraction of the distribution which should be under the calculated
diameter , [ - ]
n : int , optional
None ( for the ` order ` specified when the distribution was created ) ,
0 ( number ) , 1 ( length ) , 2 ( area ) , 3 ( volume / mass ) ,
or any integer , [ - ]
Returns
d : float
Particle size diameter , [ m ]
Examples
> > > psd = PSDLognormal ( s = 0.5 , d _ characteristic = 5E - 6 , order = 3)
> > > psd . dn ( . 5)
5e - 06
> > > psd . dn ( 1)
0.00029474365335233776
> > > psd . dn ( 0)
0.0''' | if fraction == 1.0 : # Avoid returning the maximum value of the search interval
fraction = 1.0 - epsilon
if fraction < 0 :
raise ValueError ( 'Fraction must be more than 0' )
elif fraction == 0 : # pragma : no cover
if self . truncated :
return self . d_min
return 0.0
# Solve to float prevision limit - works well , but is there a real
# point when with mpmath it would never happen ?
# dist . cdf ( dist . dn ( 0 ) - 1e - 35 ) = = 0
# dist . cdf ( dist . dn ( 0 ) - 1e - 36 ) = = input
# dn ( 0 ) = = 1.9663615597466143e - 20
# def err ( d ) :
# cdf = self . cdf ( d , n = n )
# if cdf = = 0:
# cdf = - 1
# return cdf
# return brenth ( err , self . d _ minimum , self . d _ excessive , maxiter = 1000 , xtol = 1E - 200)
elif fraction > 1 :
raise ValueError ( 'Fraction less than 1' )
# As the dn may be incredibly small , it is required for the absolute
# tolerance to not be happy - it needs to continue iterating as long
# as necessary to pin down the answer
return brenth ( lambda d : self . cdf ( d , n = n ) - fraction , self . d_minimum , self . d_excessive , maxiter = 1000 , xtol = 1E-200 ) |
def add_padding ( self , name , left = 0 , right = 0 , top = 0 , bottom = 0 , value = 0 , input_name = 'data' , output_name = 'out' , padding_type = 'constant' ) :
"""Add a padding layer to the model . Kindly refer to NeuralNetwork . proto for details .
Parameters
name : str
The name of this layer .
left : int
Number of elements to be padded on the left side of the input blob .
right : int
Number of elements to be padded on the right side of the input blob .
top : int
Number of elements to be padded on the top of the input blob .
bottom : int
Number of elements to be padded on the bottom of the input blob .
value : float
Value of the elements padded . Used only when padding _ type = ' constant '
input _ name : str
The input blob name of this layer .
output _ name : str
The output blob name of this layer .
padding _ type : str
Type of the padding . Can be one of ' constant ' , ' reflection ' or ' replication '
See Also
add _ crop , add _ convolution , add _ pooling""" | # Currently only constant padding is supported .
spec = self . spec
nn_spec = self . nn_spec
# Add a new layer
spec_layer = nn_spec . layers . add ( )
spec_layer . name = name
spec_layer . input . append ( input_name )
spec_layer . output . append ( output_name )
spec_layer_params = spec_layer . padding
# Set the parameters
if padding_type == 'constant' :
spec_layer_params . constant . value = value
elif padding_type == 'reflection' :
spec_layer_params . reflection . MergeFromString ( b'' )
elif padding_type == 'replication' :
spec_layer_params . replication . MergeFromString ( b'' )
else :
raise ValueError ( "Unknown padding_type %s" % ( padding_type ) )
height_border = spec_layer_params . paddingAmounts . borderAmounts . add ( )
height_border . startEdgeSize = top
height_border . endEdgeSize = bottom
width_border = spec_layer_params . paddingAmounts . borderAmounts . add ( )
width_border . startEdgeSize = left
width_border . endEdgeSize = right |
def get_resource ( resource_name , key , identifier_fields , profile = 'pagerduty' , subdomain = None , api_key = None ) :
'''Get any single pagerduty resource by key .
We allow flexible lookup by any of a list of identifier _ fields .
So , for example , you can look up users by email address or name by calling :
get _ resource ( ' users ' , key , [ ' name ' , ' email ' ] , . . . )
This method is mainly used to translate state sls into pagerduty id ' s for dependent objects .
For example , a pagerduty escalation policy contains one or more schedules , which must be passed
by their pagerduty id . We look up the schedules by name ( using this method ) , and then translate
the names into id ' s .
This method is implemented by getting all objects of the resource type ( cached into _ _ context _ _ ) ,
then brute force searching through the list and trying to match any of the identifier _ fields .
The _ _ context _ _ cache is purged after any create , update or delete to the resource .''' | # cache the expensive ' get all resources ' calls into _ _ context _ _ so that we do them once per salt run
if 'pagerduty_util.resource_cache' not in __context__ :
__context__ [ 'pagerduty_util.resource_cache' ] = { }
if resource_name not in __context__ [ 'pagerduty_util.resource_cache' ] :
if resource_name == 'services' :
action = resource_name + '?include[]=escalation_policy'
else :
action = resource_name
__context__ [ 'pagerduty_util.resource_cache' ] [ resource_name ] = _query ( action = action , profile = profile , subdomain = subdomain , api_key = api_key ) [ resource_name ]
for resource in __context__ [ 'pagerduty_util.resource_cache' ] [ resource_name ] :
for field in identifier_fields :
if resource [ field ] == key : # PagerDuty ' s / schedules endpoint returns less data than / schedules / : id .
# so , now that we found the schedule , we need to get all the data for it .
if resource_name == 'schedules' :
full_resource_info = _query ( action = '{0}/{1}' . format ( resource_name , resource [ 'id' ] ) , profile = profile , subdomain = subdomain , api_key = api_key )
return full_resource_info
return resource
return None |
def simple_preprocess ( doc , deacc = False , min_len = 2 , max_len = 15 ) :
"""Convert a document into a list of tokens .
This lowercases , tokenizes , de - accents ( optional ) . - - the output are final
tokens = unicode strings , that won ' t be processed any further .""" | tokens = [ token for token in tokenize ( doc , lower = True , deacc = deacc , errors = 'ignore' ) if min_len <= len ( token ) <= max_len and not token . startswith ( '_' ) ]
return tokens |
def iterencode ( self , o , _one_shot = False ) :
"""Encode the given object and yield each string
representation as available .
For example : :
for chunk in JSONEncoder ( ) . iterencode ( bigobject ) :
mysocket . write ( chunk )""" | c_make_encoder_original = json . encoder . c_make_encoder
json . encoder . c_make_encoder = None
if self . check_circular :
markers = { }
else :
markers = None
if self . ensure_ascii :
_encoder = json . encoder . encode_basestring_ascii
else :
_encoder = json . encoder . encode_basestring
def floatstr ( o , allow_nan = self . allow_nan , _repr = lambda x : format ( x , self . _formatter ) , _inf = float ( "inf" ) , _neginf = - float ( "inf" ) ) : # Check for specials . Note that this type of test is processor
# and / or platform - specific , so do tests which don ' t depend on the
# internals .
if o != o :
text = 'NaN'
elif o == _inf :
text = 'Infinity'
elif o == _neginf :
text = '-Infinity'
else :
return _repr ( o )
if not allow_nan :
raise ValueError ( "Out of range float values are not JSON compliant: " + repr ( o ) )
return text
if ( _one_shot and json . encoder . c_make_encoder is not None and self . indent is None ) :
_iterencode = json . encoder . c_make_encoder ( markers , self . default , _encoder , self . indent , self . key_separator , self . item_separator , self . sort_keys , self . skipkeys , self . allow_nan )
else :
_iterencode = json . encoder . _make_iterencode ( markers , self . default , _encoder , self . indent , floatstr , self . key_separator , self . item_separator , self . sort_keys , self . skipkeys , _one_shot )
json . encoder . c_make_encoder = c_make_encoder_original
return _iterencode ( o , 0 ) |
def _create_entry ( self , location , element , unique = True , delete_element = False ) :
"""Create an entry located at ` ` location ` ` .
Args :
location : String or : class : ` LocationDescriptor ` to describe a " separator location " ( i . e . dir1 / dir2 / dir3 for
instance ) .
element : Element to store at the location .
unique : ` ` True ` ` means that the element to store * * must * * be unique and that the corresponding node doesn ' t already exist .
delete _ element : In case the element must not be unique , delete or not the existing element at
the ` ` location ` ` if it exist ?
Returns :
The created node with the element .
Raises :
A ` ` RuntimeError ` ` is raised if leaf node already exists and ` ` unique ` ` is set to ` ` True ` ` .
Note :
Non existing linking node ( i . e . non leaf nodes ) are created on the fly .""" | loc_descriptor = self . _get_location_descriptor ( location )
# find parent node
parent_node = self . _root_node
if loc_descriptor . nbr_of_sub_locations ( ) > 1 :
parent_node = self . _get_node ( loc_descriptor . get_sub_location_descriptor ( ) , create_non_existing_nodes = True )
# find child node if it exist
last_location = loc_descriptor . last_sub_location ( )
child_node = parent_node . get_child_node_or_default ( last_location , None )
if child_node is None : # create node
child_node = TreeMapNode ( element )
parent_node . set_child_node ( last_location , child_node )
self . _nbr_of_nodes += 1
else : # child node exist
if unique :
raise RuntimeError ( "Node corresponding to the location '%s' already exist!" % loc_descriptor . to_string ( ) )
elif delete_element :
child_node . delete_element ( )
child_node . set_element ( element )
return child_node |
def _insert_manifest_item ( configurator , key , item ) :
"""Insert an item in the list of an existing manifest key""" | with _open_manifest ( configurator ) as f :
manifest = f . read ( )
if item in ast . literal_eval ( manifest ) . get ( key , [ ] ) :
return
pattern = """(["']{}["']:\\s*\\[)""" . format ( key )
repl = """\\1\n '{}',""" . format ( item )
manifest = re . sub ( pattern , repl , manifest , re . MULTILINE )
with _open_manifest ( configurator , "w" ) as f :
f . write ( manifest ) |
def _set ( self ) :
"""Get / Set a set .""" | class Sets ( object ) :
def __getitem__ ( _self , name ) :
return self . getSet ( name )
def __setitem__ ( _self , name , values ) :
self . getSet ( name ) . setValues ( values )
def __iter__ ( _self ) :
return self . getSets ( )
return Sets ( ) |
def from_file ( filename ) :
"""Parse cookie data from a text file in HTTP header format .
@ return : list of tuples ( headers , scheme , host , path )""" | entries = [ ]
with open ( filename ) as fd :
lines = [ ]
for line in fd . readlines ( ) :
line = line . rstrip ( )
if not line :
if lines :
entries . append ( from_headers ( "\r\n" . join ( lines ) ) )
lines = [ ]
else :
lines . append ( line )
if lines :
entries . append ( from_headers ( "\r\n" . join ( lines ) ) )
return entries |
def emboss_pepstats_parser ( infile ) :
"""Get dictionary of pepstats results .
Args :
infile : Path to pepstats outfile
Returns :
dict : Parsed information from pepstats
TODO :
Only currently parsing the bottom of the file for percentages of properties .""" | with open ( infile ) as f :
lines = f . read ( ) . split ( '\n' )
info_dict = { }
for l in lines [ 38 : 47 ] :
info = l . split ( '\t' )
cleaninfo = list ( filter ( lambda x : x != '' , info ) )
prop = cleaninfo [ 0 ]
num = cleaninfo [ 2 ]
percent = float ( cleaninfo [ - 1 ] ) / float ( 100 )
info_dict [ 'mol_percent_' + prop . lower ( ) + '-pepstats' ] = percent
return info_dict |
def getHostCertPath ( self , name ) :
'''Gets the path to a host certificate .
Args :
name ( str ) : The name of the host keypair .
Examples :
Get the path to the host certificate for the host " myhost " :
mypath = cdir . getHostCertPath ( ' myhost ' )
Returns :
str : The path if exists .''' | path = s_common . genpath ( self . certdir , 'hosts' , '%s.crt' % name )
if not os . path . isfile ( path ) :
return None
return path |
def mean_sq_jump_dist ( self , discard_frac = 0.1 ) :
"""Mean squared jumping distance estimated from chain .
Parameters
discard _ frac : float
fraction of iterations to discard at the beginning ( as a burn - in )
Returns
float""" | discard = int ( self . niter * discard_frac )
return msjd ( self . chain . theta [ discard : ] ) |
def set_size ( self , size ) :
"""Changes the file size .
in size of type int
The new file size .
raises : class : ` OleErrorNotimpl `
The method is not implemented yet .""" | if not isinstance ( size , baseinteger ) :
raise TypeError ( "size can only be an instance of type baseinteger" )
self . _call ( "setSize" , in_p = [ size ] ) |
def fast ( self ) :
"""Access the ' fast ' dimension
This mode yields iline or xline mode , depending on which one is laid
out ` faster ` , i . e . the line with linear disk layout . Use this mode if
the inline / crossline distinction isn ' t as interesting as traversing in
a fast manner ( typically when you want to apply a function to the whole
file , line - by - line ) .
Returns
fast : Line
line addressing mode
Notes
. . versionadded : : 1.1""" | if self . sorting == TraceSortingFormat . INLINE_SORTING :
return self . iline
elif self . sorting == TraceSortingFormat . CROSSLINE_SORTING :
return self . xline
else :
raise RuntimeError ( "Unknown sorting." ) |
def ReadClientPostingLists ( self , keywords ) :
"""Looks up all clients associated with any of the given keywords .
Args :
keywords : A list of keywords we are interested in .
Returns :
A dict mapping each keyword to a list of matching clients .""" | start_time , filtered_keywords = self . _AnalyzeKeywords ( keywords )
return data_store . REL_DB . ListClientsForKeywords ( filtered_keywords , start_time = start_time ) |
def fake_exc_info ( exc_info , filename , lineno ) :
"""Helper for ` translate _ exception ` .""" | exc_type , exc_value , tb = exc_info
# figure the real context out
if tb is not None :
real_locals = tb . tb_frame . f_locals . copy ( )
ctx = real_locals . get ( 'context' )
if ctx :
locals = ctx . get_all ( )
else :
locals = { }
for name , value in real_locals . iteritems ( ) :
if name . startswith ( 'l_' ) and value is not missing :
locals [ name [ 2 : ] ] = value
# if there is a local called _ _ jinja _ exception _ _ , we get
# rid of it to not break the debug functionality .
locals . pop ( '__jinja_exception__' , None )
else :
locals = { }
# assamble fake globals we need
globals = { '__name__' : filename , '__file__' : filename , '__jinja_exception__' : exc_info [ : 2 ] , # we don ' t want to keep the reference to the template around
# to not cause circular dependencies , but we mark it as Jinja
# frame for the ProcessedTraceback
'__jinja_template__' : None }
# and fake the exception
code = compile ( '\n' * ( lineno - 1 ) + raise_helper , filename , 'exec' )
# if it ' s possible , change the name of the code . This won ' t work
# on some python environments such as google appengine
try :
if tb is None :
location = 'template'
else :
function = tb . tb_frame . f_code . co_name
if function == 'root' :
location = 'top-level template code'
elif function . startswith ( 'block_' ) :
location = 'block "%s"' % function [ 6 : ]
else :
location = 'template'
code = CodeType ( 0 , code . co_nlocals , code . co_stacksize , code . co_flags , code . co_code , code . co_consts , code . co_names , code . co_varnames , filename , location , code . co_firstlineno , code . co_lnotab , ( ) , ( ) )
except :
pass
# execute the code and catch the new traceback
try : exec
code in globals , locals
except :
exc_info = sys . exc_info ( )
new_tb = exc_info [ 2 ] . tb_next
# return without this frame
return exc_info [ : 2 ] + ( new_tb , ) |
def generate_one ( basename , xml ) :
'''generate headers for one XML file''' | directory = os . path . join ( basename , xml . basename )
print ( "Generating C implementation in directory %s" % directory )
mavparse . mkdir_p ( directory )
if xml . little_endian :
xml . mavlink_endian = "MAVLINK_LITTLE_ENDIAN"
else :
xml . mavlink_endian = "MAVLINK_BIG_ENDIAN"
if xml . crc_extra :
xml . crc_extra_define = "1"
else :
xml . crc_extra_define = "0"
if xml . command_24bit :
xml . command_24bit_define = "1"
else :
xml . command_24bit_define = "0"
if xml . sort_fields :
xml . aligned_fields_define = "1"
else :
xml . aligned_fields_define = "0"
# work out the included headers
xml . include_list = [ ]
for i in xml . include :
base = i [ : - 4 ]
xml . include_list . append ( mav_include ( base ) )
# form message lengths array
xml . message_lengths_array = ''
if not xml . command_24bit :
for msgid in range ( 256 ) :
mlen = xml . message_min_lengths . get ( msgid , 0 )
xml . message_lengths_array += '%u, ' % mlen
xml . message_lengths_array = xml . message_lengths_array [ : - 2 ]
# and message CRCs array
xml . message_crcs_array = ''
if xml . command_24bit : # we sort with primary key msgid
for msgid in sorted ( xml . message_crcs . keys ( ) ) :
xml . message_crcs_array += '{%u, %u, %u, %u, %u, %u}, ' % ( msgid , xml . message_crcs [ msgid ] , xml . message_min_lengths [ msgid ] , xml . message_flags [ msgid ] , xml . message_target_system_ofs [ msgid ] , xml . message_target_component_ofs [ msgid ] )
else :
for msgid in range ( 256 ) :
crc = xml . message_crcs . get ( msgid , 0 )
xml . message_crcs_array += '%u, ' % crc
xml . message_crcs_array = xml . message_crcs_array [ : - 2 ]
# form message info array
xml . message_info_array = ''
if xml . command_24bit : # we sort with primary key msgid
for msgid in sorted ( xml . message_names . keys ( ) ) :
name = xml . message_names [ msgid ]
xml . message_info_array += 'MAVLINK_MESSAGE_INFO_%s, ' % name
else :
for msgid in range ( 256 ) :
name = xml . message_names . get ( msgid , None )
if name is not None :
xml . message_info_array += 'MAVLINK_MESSAGE_INFO_%s, ' % name
else : # Several C compilers don ' t accept { NULL } for
# multi - dimensional arrays and structs
# feed the compiler a " filled " empty message
xml . message_info_array += '{"EMPTY",0,{{"","",MAVLINK_TYPE_CHAR,0,0,0}}}, '
xml . message_info_array = xml . message_info_array [ : - 2 ]
# add some extra field attributes for convenience with arrays
for m in xml . message :
m . msg_name = m . name
if xml . crc_extra :
m . crc_extra_arg = ", %s" % m . crc_extra
else :
m . crc_extra_arg = ""
for f in m . fields :
if f . print_format is None :
f . c_print_format = 'NULL'
else :
f . c_print_format = '"%s"' % f . print_format
if f . array_length != 0 :
f . array_suffix = '[%u]' % f . array_length
f . array_prefix = '*'
f . array_tag = '_array'
f . array_arg = ', %u' % f . array_length
f . array_return_arg = '%s, %u, ' % ( f . name , f . array_length )
f . array_const = 'const '
f . decode_left = ''
f . decode_right = ', %s->%s' % ( m . name_lower , f . name )
f . return_type = 'uint16_t'
f . get_arg = ', %s *%s' % ( f . type , f . name )
if f . type == 'char' :
f . c_test_value = '"%s"' % f . test_value
else :
test_strings = [ ]
for v in f . test_value :
test_strings . append ( str ( v ) )
f . c_test_value = '{ %s }' % ', ' . join ( test_strings )
else :
f . array_suffix = ''
f . array_prefix = ''
f . array_tag = ''
f . array_arg = ''
f . array_return_arg = ''
f . array_const = ''
f . decode_left = "%s->%s = " % ( m . name_lower , f . name )
f . decode_right = ''
f . get_arg = ''
f . return_type = f . type
if f . type == 'char' :
f . c_test_value = "'%s'" % f . test_value
elif f . type == 'uint64_t' :
f . c_test_value = "%sULL" % f . test_value
elif f . type == 'int64_t' :
f . c_test_value = "%sLL" % f . test_value
else :
f . c_test_value = f . test_value
# cope with uint8 _ t _ mavlink _ version
for m in xml . message :
m . arg_fields = [ ]
m . array_fields = [ ]
m . scalar_fields = [ ]
for f in m . ordered_fields :
if f . array_length != 0 :
m . array_fields . append ( f )
else :
m . scalar_fields . append ( f )
for f in m . fields :
if not f . omit_arg :
m . arg_fields . append ( f )
f . putname = f . name
else :
f . putname = f . const_value
generate_mavlink_h ( directory , xml )
generate_version_h ( directory , xml )
generate_main_h ( directory , xml )
for m in xml . message :
generate_message_h ( directory , m )
generate_testsuite_h ( directory , xml ) |
def signrawtransaction ( self , rawtxhash , parent_tx_outputs = None , private_key = None ) :
"""signrawtransaction returns status and rawtxhash
: rawtxhash - serialized transaction ( hex )
: parent _ tx _ outputs - outputs being spent by this transaction
: private _ key - a private key to sign this transaction with""" | if not parent_tx_outputs and not private_key :
return self . req ( "signrawtransaction" , [ rawtxhash ] )
else :
return self . req ( "signrawtransaction" , [ rawtxhash , parent_tx_outputs , private_key ] ) |
def read_static_uplink ( self ) :
"""Read the static uplink from file , if given .""" | if self . node_list is None or self . node_uplink_list is None :
return
for node , port in zip ( self . node_list . split ( ',' ) , self . node_uplink_list . split ( ',' ) ) :
if node . strip ( ) == self . host_name :
self . static_uplink = True
self . static_uplink_port = port . strip ( )
return |
async def SetPassword ( self , changes ) :
'''changes : typing . Sequence [ ~ EntityPassword ]
Returns - > typing . Sequence [ ~ ErrorResult ]''' | # map input types to rpc msg
_params = dict ( )
msg = dict ( type = 'UserManager' , request = 'SetPassword' , version = 1 , params = _params )
_params [ 'changes' ] = changes
reply = await self . rpc ( msg )
return reply |
def close ( self ) :
"""Close any open connections to Redis .
: raises : : exc : ` tredis . exceptions . ConnectionError `""" | if not self . _connected . is_set ( ) :
raise exceptions . ConnectionError ( 'not connected' )
self . _closing = True
if self . _clustering :
for host in self . _cluster . keys ( ) :
self . _cluster [ host ] . close ( )
elif self . _connection :
self . _connection . close ( ) |
def connect ( self ) :
"""Set TCP _ NODELAY on socket""" | HTTPConnection . connect ( self )
self . sock . setsockopt ( socket . IPPROTO_TCP , socket . TCP_NODELAY , 1 ) |
def create ( self , ignore_warnings = None ) :
"""Create this AppProfile .
. . note : :
Uses the ` ` instance ` ` and ` ` app _ profile _ id ` ` on the current
: class : ` AppProfile ` in addition to the ` ` routing _ policy _ type ` ` ,
` ` description ` ` , ` ` cluster _ id ` ` and ` ` allow _ transactional _ writes ` ` .
To change them before creating , reset the values via
. . code : : python
app _ profile . app _ profile _ id = ' i - changed - my - mind '
app _ profile . routing _ policy _ type = (
google . cloud . bigtable . enums . RoutingPolicyType . SINGLE
app _ profile . description = ' new - description '
app - profile . cluster _ id = ' other - cluster - id '
app - profile . allow _ transactional _ writes = True
before calling : meth : ` create ` .
: type : ignore _ warnings : bool
: param : ignore _ warnings : ( Optional ) If true , ignore safety checks when
creating the AppProfile .""" | return self . from_pb ( self . instance_admin_client . create_app_profile ( parent = self . _instance . name , app_profile_id = self . app_profile_id , app_profile = self . _to_pb ( ) , ignore_warnings = ignore_warnings , ) , self . _instance , ) |
def Cross ( width = 3 , color = 0 ) :
"""Draws a cross centered in the target area
: param width : width of the lines of the cross in pixels
: type width : int
: param color : color of the lines of the cross
: type color : pygame . Color""" | return Overlay ( Line ( "h" , width , color ) , Line ( "v" , width , color ) ) |
def check_response ( self , response ) :
"""Raises error if the response isn ' t successful .
: param response : requests . Response response to be checked""" | if response . status_code == 401 :
raise D4S2Error ( UNAUTHORIZED_MESSAGE )
if not 200 <= response . status_code < 300 :
raise D4S2Error ( "Request to {} failed with {}:\n{}." . format ( response . url , response . status_code , response . text ) ) |
def delete ( self , list_uuid , uuid ) :
"""Delete one list .""" | res = self . get ( list_uuid , uuid )
url = "%(base)s/%(list_uuid)s/contacts/%(uuid)s" % { 'base' : self . local_base_url , 'list_uuid' : list_uuid , 'uuid' : uuid }
self . core . delete ( url )
return res |
def install_requirements ( self ) :
"""Install Ubuntu Requirements""" | print ( 'Installing Requirements' )
print ( platform . dist ( ) )
if platform . dist ( ) [ 0 ] in [ 'Ubuntu' , 'LinuxMint' ] :
command = 'sudo apt-get install -y gcc git python3-dev zlib1g-dev make zip libssl-dev libbz2-dev liblzma-dev libcurl4-openssl-dev build-essential libxml2-dev apache2 zlib1g-dev bcftools build-essential cpanminus curl git libbz2-dev libcurl4-openssl-dev liblocal-lib-perl liblzma-dev libmysqlclient-dev libpng-dev libpq-dev libssl-dev manpages mysql-client openssl perl perl-base pkg-config python3-dev python3-pip python3-setuptools sed tabix unzip vcftools vim wget zlib1g-dev apache2 build-essential cpanminus curl git libmysqlclient-dev libpng-dev libssl-dev locales manpages mysql-client openssl perl perl-base unzip vim wget libgd-dev'
# lamp - server ^
sts = call ( command , shell = True )
try :
subprocess . call ( [ 'java' , '-version' ] )
except :
command = """sudo apt install -y software-properties-common
sudo add-apt-repository -y ppa:webupd8team/java
sudo apt-get update
echo "oracle-java8-installer shared/accepted-oracle-license-v1-1 select true" | sudo debconf-set-selections
sudo apt-get -y install oracle-java8-installer"""
sts = call ( command , shell = True )
elif platform . dist ( ) [ 0 ] in [ 'debian' ] :
command = 'sudo apt-get update'
sts = call ( command , shell = True )
command = 'sudo apt-get install -y libmodule-install-perl apache2 bcftools build-essential cpanminus curl git libbz2-dev libcurl4-openssl-dev liblocal-lib-perl liblzma-dev default-libmysqlclient-dev libpng-dev libpq-dev libssl-dev manpages mysql-client openssl perl perl-base pkg-config python3-dev python3-pip python3-setuptools sed tabix unzip vcftools vim wget zlib1g-dev apache2 build-essential cpanminus curl git libpng-dev libssl-dev locales manpages mysql-client openssl perl perl-base unzip vim wget libgd-dev libxml-libxml-perl libgd-dev'
# lamp - server ^
sts = call ( command , shell = True )
command = 'sudo apt-get install -y default-jre default-jdk'
sts = call ( command , shell = True )
elif platform . dist ( ) [ 0 ] in [ 'redhat' , 'centos' ] :
command = 'sudo yum install libcurl-devel sed vcftools bcftools tabix zlib-devel postgresql96-libs perl-local-lib perl-App-cpanminus curl unzip wget'
sts = call ( command , shell = True )
command = """sudo yum groupinstall 'Development Tools'"""
sts = call ( command , shell = True )
command = """sudo yum install gcc gcc-c++ make openssl-devel"""
sts = call ( command , shell = True )
try :
subprocess . call ( [ 'java' , '-version' ] )
except :
command = "sudo yum install -y java-1.8.0-openjdk"
sts = call ( command , shell = True )
# Perl Requirements
command = "sudo cpanm DBI DBD::mysql File::Copy::Recursive Archive::Extract Archive::Zip LWP::Simple Bio::Root::Version LWP::Protocol::https Bio::DB::Fasta CGI Test::utf8 Test::File inc::Module::Install XML::DOM::XPath XML::LibXML"
sts = call ( command , shell = True )
command = "sudo cpanm --local-lib=~/perl5 local::lib && eval $(perl -I ~/perl5/lib/perl5/ -Mlocal::lib)"
sts = call ( command , shell = True ) |
def additional_assets ( context : Context ) :
"""Collects assets from GOV . UK frontend toolkit""" | rsync_flags = '-avz' if context . verbosity == 2 else '-az'
for path in context . app . additional_asset_paths :
context . shell ( 'rsync %s %s %s/' % ( rsync_flags , path , context . app . asset_build_path ) ) |
def persistentRegisterInspector ( fullName , fullClassName , pythonPath = '' ) :
"""Registers an inspector
Loads or inits the inspector registry , register the inspector and saves the settings .
Important : instantiate a Qt application first to use the correct settings file / winreg .""" | registry = InspectorRegistry ( )
registry . loadOrInitSettings ( )
registry . registerInspector ( fullName , fullClassName , pythonPath = pythonPath )
registry . saveSettings ( ) |
def parse ( self , data ) : # type : ( bytes ) - > None
'''Parse the passed in data into a UDF ICB Tag .
Parameters :
data - The data to parse .
Returns :
Nothing .''' | if self . _initialized :
raise pycdlibexception . PyCdlibInternalError ( 'UDF ICB Tag already initialized' )
( self . prior_num_direct_entries , self . strategy_type , self . strategy_param , self . max_num_entries , reserved , self . file_type , self . parent_icb_log_block_num , self . parent_icb_part_ref_num , self . flags ) = struct . unpack_from ( self . FMT , data , 0 )
if self . strategy_type not in ( 4 , 4096 ) :
raise pycdlibexception . PyCdlibInvalidISO ( 'UDF ICB Tag invalid strategy type' )
if reserved != 0 :
raise pycdlibexception . PyCdlibInvalidISO ( 'UDF ICB Tag reserved not 0' )
self . _initialized = True |
def execute ( command , return_output = True , log_file = None , log_settings = None , error_logfile = None , timeout = None , line_function = None , poll_timing = 0.01 , logger = None , working_folder = None , env = None ) :
"""Execute a program and logs standard output into a file .
: param return _ output : returns the STDOUT value if True or returns the return code
: param logfile : path where log file should be written ( displayed on STDOUT if not set )
: param error _ logfile : path where error log file should be written ( displayed on STDERR if not set )
: param timeout : if set , it will kill the subprocess created when " timeout " seconds is reached . It will then raise an Exception .
: param line _ function : set it to a " function pointer " for the function to be called each time a new line is written ( line passed as a parameter ) .
: param poll _ timing : wait time between timeout checks and std output check .
: returns : Standard output of the command or if return _ output = False , it will give the " return code " of the command""" | tmp_log = False
if log_settings :
log_folder = log_settings . get ( 'LOG_FOLDER' )
else :
tmp_log = True
log_folder = tempfile . mkdtemp ( )
if not log_file :
log_file = os . path . join ( log_folder , "commands" , "execute-command-logfile-%s.log" % UUID . uuid4 ( ) )
try :
if not os . path . isdir ( os . path . join ( log_folder , "commands" ) ) :
os . makedirs ( os . path . join ( log_folder , "commands" ) )
except :
pass
if not logger :
logger = logging . getLogger ( 'command_execute' )
logfile_writer = open ( log_file , 'a' )
header = "%s - Executing command (timeout=%s) :\n\t%s\n\n\n" % ( datetime . now ( ) . isoformat ( ) , timeout , command )
logfile_writer . write ( header )
logfile_writer . flush ( )
logfile_reader = open ( log_file , 'rb' )
logfile_reader . seek ( 0 , os . SEEK_END )
logfile_start_position = logfile_reader . tell ( )
if error_logfile :
err_logfile_writer = open ( error_logfile , 'a' )
else :
err_logfile_writer = logfile_writer
start = datetime . now ( )
timeout_string = ""
if timeout :
timeout_string = "(timeout=%s)" % timeout
logger . info ( u"Executing command %s :\n\t\t%s" % ( timeout_string , command ) )
# We use " exec < command > " as Popen launches a shell , that runs the command .
# It will transform the child process " sh " into the " command exectable " because of the " exec " .
# Said more accuratly , it won ' t fork to create launch the command in a sub sub process .
# Therefore , when you kill the child process , you kill the " command " process and not the unecessary " sh " parent process .
if sys . platform != 'win32' :
command = u"exec %s" % text_utils . uni ( command )
process = subprocess . Popen ( command , stdout = logfile_writer , stderr = err_logfile_writer , bufsize = 1 , shell = True , cwd = working_folder , env = env )
while process . poll ( ) == None : # In order to avoid unecessary cpu usage , we wait for " poll _ timing " seconds ( default : 0.1 sec )
time . sleep ( poll_timing )
# Timeout check
if timeout != None :
now = datetime . now ( )
if ( now - start ) . seconds > timeout : # process . terminate ( ) ? ?
os . kill ( process . pid , signal . SIGKILL )
os . waitpid ( - 1 , os . WNOHANG )
raise Exception ( "Command execution timed out (took more than %s seconds...)" % timeout )
# Line function call :
# = > if line _ function is defined , we call it on each new line of the file .
if line_function :
o = text_utils . uni ( logfile_reader . readline ( ) ) . rstrip ( )
while o != '' :
line_function ( o )
o = text_utils . uni ( logfile_reader . readline ( ) ) . rstrip ( )
if not return_output : # Return result code and ensure we have waited for the end of sub process
return process . wait ( )
logfile_reader . seek ( logfile_start_position , os . SEEK_SET )
# back to the beginning of the file
res = text_utils . uni ( logfile_reader . read ( ) )
try :
logfile_reader . close ( )
logfile_writer . close ( )
err_logfile_writer . close ( )
if tmp_log :
shutil . rmtree ( log_folder , ignore_errors = True )
except :
logger . exception ( "Error while cleaning after tbx.execute() call." )
return res |
def cosine_similarity_vec ( num_tokens , num_removed_vec ) :
"""Return cosine similarity between a binary vector with all ones
of length ` ` num _ tokens ` ` and vectors of the same length with
` ` num _ removed _ vec ` ` elements set to zero .""" | remaining = - np . array ( num_removed_vec ) + num_tokens
return remaining / ( np . sqrt ( num_tokens + 1e-6 ) * np . sqrt ( remaining + 1e-6 ) ) |
def furnish ( app : web . Application ) :
"""Configures Application routes , readying it for running .
This function modifies routes and resources that were added by calling code ,
and must be called immediately prior to ` run ( app ) ` .
Args :
app ( web . Application ) :
The Aiohttp Application as created by ` create _ app ( ) `""" | app_name = app [ 'config' ] [ 'name' ]
prefix = '/' + app_name . lstrip ( '/' )
app . router . add_routes ( routes )
cors_middleware . enable_cors ( app )
# Configure CORS and prefixes on all endpoints .
known_resources = set ( )
for route in list ( app . router . routes ( ) ) :
if route . resource in known_resources :
continue
known_resources . add ( route . resource )
route . resource . add_prefix ( prefix )
# Configure swagger settings
# We set prefix explicitly here
aiohttp_swagger . setup_swagger ( app , swagger_url = prefix + '/api/doc' , description = '' , title = f'Brewblox Service "{app_name}"' , api_version = '0.0' , contact = 'development@brewpi.com' )
LOGGER . info ( 'Service info: ' + getenv ( 'SERVICE_INFO' , 'UNKNOWN' ) )
for route in app . router . routes ( ) :
LOGGER . info ( f'Endpoint [{route.method}] {route.resource}' )
for name , impl in app . get ( features . FEATURES_KEY , { } ) . items ( ) :
LOGGER . info ( f'Feature [{name}] {impl}' ) |
def s3_get ( url : str , temp_file : IO ) -> None :
"""Pull a file directly from S3.""" | s3_resource = boto3 . resource ( "s3" )
bucket_name , s3_path = split_s3_path ( url )
s3_resource . Bucket ( bucket_name ) . download_fileobj ( s3_path , temp_file ) |
def from_config ( cls , obj , selectable , ingredient_constructor = ingredient_from_validated_dict , metadata = None ) :
"""Create a shelf using a dict shelf definition .
: param obj : A Python dictionary describing a Shelf .
: param selectable : A SQLAlchemy Table , a Recipe , a table name , or a
SQLAlchemy join to select from .
: param metadata : If ` selectable ` is passed as a table name , then in
order to introspect its schema , we must have the SQLAlchemy
MetaData object to associate it with .
: return : A shelf that contains the ingredients defined in obj .""" | from recipe import Recipe
if isinstance ( selectable , Recipe ) :
selectable = selectable . subquery ( )
elif isinstance ( selectable , basestring ) :
if '.' in selectable :
schema , tablename = selectable . split ( '.' )
else :
schema , tablename = None , selectable
selectable = Table ( tablename , metadata , schema = schema , extend_existing = True , autoload = True )
d = { }
for k , v in iteritems ( obj ) :
d [ k ] = ingredient_constructor ( v , selectable )
shelf = cls ( d , select_from = selectable )
return shelf |
def _add_gene_to_graph ( self , gene , variant_bnode , gene_id , relation ) :
""": param gene :
: param variant _ bnode :
: return :""" | model = Model ( self . graph )
if gene_id :
self . graph . addTriple ( variant_bnode , relation , gene_id )
elif gene :
LOG . info ( "gene %s not mapped to NCBI gene, making blank node" , gene )
gene_bnode = self . make_id ( "{0}" . format ( gene ) , "_" )
model . addIndividualToGraph ( gene_bnode , gene )
self . graph . addTriple ( variant_bnode , relation , gene_bnode ) |
def text ( self , x , y , txt = '' ) :
"Output a string" | txt = self . normalize_text ( txt )
if ( self . unifontsubset ) :
txt2 = self . _escape ( UTF8ToUTF16BE ( txt , False ) )
for uni in UTF8StringToArray ( txt ) :
self . current_font [ 'subset' ] . append ( uni )
else :
txt2 = self . _escape ( txt )
s = sprintf ( 'BT %.2f %.2f Td (%s) Tj ET' , x * self . k , ( self . h - y ) * self . k , txt2 )
if ( self . underline and txt != '' ) :
s += ' ' + self . _dounderline ( x , y , txt )
if ( self . color_flag ) :
s = 'q ' + self . text_color + ' ' + s + ' Q'
self . _out ( s ) |
def reduce_ ( self ) :
r"""Return a degree - reduced version of the current curve .
. . _ pseudo - inverse :
https : / / en . wikipedia . org / wiki / Moore % E2%80%93Penrose _ pseudoinverse
Does this by converting the current nodes : math : ` v _ 0 , \ ldots , v _ n `
to new nodes : math : ` w _ 0 , \ ldots , w _ { n - 1 } ` that correspond to
reversing the : meth : ` elevate ` process .
This uses the ` pseudo - inverse ` _ of the elevation matrix . For example
when elevating from degree 2 to 3 , the matrix : math : ` E _ 2 ` is given by
. . math : :
\ mathbf { v } = \ left [ \ begin { array } { c c c } v _ 0 & v _ 1 & v _ 2
\ end { array } \ right ] \ longmapsto \ left [ \ begin { array } { c c c c }
v _ 0 & \ frac { v _ 0 + 2 v _ 1 } { 3 } & \ frac { 2 v _ 1 + v _ 2 } { 3 } & v _ 2
\ end { array } \ right ] = \ frac { 1 } { 3 } \ mathbf { v }
\ left [ \ begin { array } { c c c c } 3 & 1 & 0 & 0 \ \
0 & 2 & 2 & 0 \ \ 0 & 0 & 1 & 3 \ end { array } \ right ]
and the ( right ) pseudo - inverse is given by
. . math : :
R _ 2 = E _ 2 ^ T \ left ( E _ 2 E _ 2 ^ T \ right ) ^ { - 1 } = \ frac { 1 } { 20}
\ left [ \ begin { array } { c c c } 19 & - 5 & 1 \ \
3 & 15 & - 3 \ \ - 3 & 15 & 3 \ \ 1 & - 5 & 19
\ end { array } \ right ] .
. . warning : :
Though degree - elevation preserves the start and end nodes , degree
reduction has no such guarantee . Rather , the nodes produced are
" best " in the least squares sense ( when solving the normal
equations ) .
. . image : : . . / . . / images / curve _ reduce . png
: align : center
. . testsetup : : curve - reduce , curve - reduce - approx
import numpy as np
import bezier
. . doctest : : curve - reduce
: options : + NORMALIZE _ WHITESPACE
> > > nodes = np . asfortranarray ( [
. . . [ - 3.0 , 0.0 , 1.0 , 0.0 ] ,
. . . [ 3.0 , 2.0 , 3.0 , 6.0 ] ,
> > > curve = bezier . Curve ( nodes , degree = 3)
> > > reduced = curve . reduce _ ( )
> > > reduced
< Curve ( degree = 2 , dimension = 2 ) >
> > > reduced . nodes
array ( [ [ - 3 . , 1.5 , 0 . ] ,
[ 3 . , 1.5 , 6 . ] ] )
. . testcleanup : : curve - reduce
import make _ images
make _ images . curve _ reduce ( curve , reduced )
In the case that the current curve * * is not * * degree - elevated .
. . image : : . . / . . / images / curve _ reduce _ approx . png
: align : center
. . doctest : : curve - reduce - approx
: options : + NORMALIZE _ WHITESPACE
> > > nodes = np . asfortranarray ( [
. . . [ 0.0 , 1.25 , 3.75 , 5.0 ] ,
. . . [ 2.5 , 5.0 , 7.5 , 2.5 ] ,
> > > curve = bezier . Curve ( nodes , degree = 3)
> > > reduced = curve . reduce _ ( )
> > > reduced
< Curve ( degree = 2 , dimension = 2 ) >
> > > reduced . nodes
array ( [ [ - 0.125 , 2.5 , 5.125 ] ,
[ 2.125 , 8.125 , 2.875 ] ] )
. . testcleanup : : curve - reduce - approx
import make _ images
make _ images . curve _ reduce _ approx ( curve , reduced )
Returns :
Curve : The degree - reduced curve .""" | new_nodes = _curve_helpers . reduce_pseudo_inverse ( self . _nodes )
return Curve ( new_nodes , self . _degree - 1 , _copy = False ) |
def _extract_from_subworkflow ( vs , step ) :
"""Remove internal variable names when moving from sub - workflow to main .""" | substep_ids = set ( [ x . name for x in step . workflow ] )
out = [ ]
for var in vs :
internal = False
parts = var [ "id" ] . split ( "/" )
if len ( parts ) > 1 :
if parts [ 0 ] in substep_ids :
internal = True
if not internal :
var . pop ( "source" , None )
out . append ( var )
return out |
def is_java_project ( self ) :
"""Indicates if the project ' s main binary is a Java Archive .""" | if self . _is_java_project is None :
self . _is_java_project = isinstance ( self . arch , ArchSoot )
return self . _is_java_project |
def unicode_char ( ignored_chars = None ) :
"""returns a handler that listens for unicode characters""" | return lambda e : e . unicode if e . type == pygame . KEYDOWN and ( ( ignored_chars is None ) or ( e . unicode not in ignored_chars ) ) else EventConsumerInfo . DONT_CARE |
def _main ( ) :
"""Command - line program that reads in JSON from stdin and writes out
pretty - printed messages to stdout .""" | if argv [ 1 : ] :
stdout . write ( _CLI_HELP )
raise SystemExit ( )
for line in stdin :
try :
message = loads ( line )
except ValueError :
stdout . write ( "Not JSON: {}\n\n" . format ( line . rstrip ( b"\n" ) ) )
continue
if REQUIRED_FIELDS - set ( message . keys ( ) ) :
stdout . write ( "Not an Eliot message: {}\n\n" . format ( line . rstrip ( b"\n" ) ) )
continue
result = pretty_format ( message ) + "\n"
if PY2 :
result = result . encode ( "utf-8" )
stdout . write ( result ) |
def sql_to_csv ( sql , engine , filepath , chunksize = 1000 , overwrite = False ) :
"""Export sql result to csv file .
: param sql : : class : ` sqlalchemy . sql . selectable . Select ` instance .
: param engine : : class : ` sqlalchemy . engine . base . Engine ` .
: param filepath : file path .
: param chunksize : number of rows write to csv each time .
: param overwrite : bool , if True , avoid to overite existing file .
* * 中文文档 * *
将执行sql的结果中的所有数据 , 以生成器的方式 ( 一次只使用一小部分内存 ) , 将
整个结果写入csv文件 。""" | if overwrite : # pragma : no cover
if os . path . exists ( filepath ) :
raise Exception ( "'%s' already exists!" % filepath )
import pandas as pd
columns = [ str ( column . name ) for column in sql . columns ]
with open ( filepath , "w" ) as f : # write header
df = pd . DataFrame ( [ ] , columns = columns )
df . to_csv ( f , header = True , index = False )
# iterate big database table
result_proxy = engine . execute ( sql )
while True :
data = result_proxy . fetchmany ( chunksize )
if len ( data ) == 0 :
break
else :
df = pd . DataFrame ( data , columns = columns )
df . to_csv ( f , header = False , index = False ) |
def base62_decode ( string ) :
"""Decode a Base X encoded string into the number
Arguments :
- ` string ` : The encoded string
- ` alphabet ` : The alphabet to use for encoding
Stolen from : http : / / stackoverflow . com / a / 1119769/1144479""" | alphabet = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
base = len ( alphabet )
strlen = len ( string )
num = 0
idx = 0
for char in string :
power = ( strlen - ( idx + 1 ) )
num += alphabet . index ( char ) * ( base ** power )
idx += 1
return int ( num ) |
def _extract_user_info ( user ) :
"""Creates a new user class with extracted user attributes for later use .
A new user object is needed to avoid overwritting of e . g . ` ` user . records ` ` .""" | temp_user = User ( )
copy_attributes = [ 'antennas' , 'name' , 'night_start' , 'night_end' , 'weekend' , 'home' ]
for attr in copy_attributes :
setattr ( temp_user , attr , getattr ( user , attr ) )
return temp_user |
def is_gzip_file ( abspath ) :
"""Parse file extension .
- * . json : uncompressed , utf - 8 encode json file
- * . gz : compressed , utf - 8 encode json file""" | abspath = abspath . lower ( )
_ , ext = os . path . splitext ( abspath )
if ext in [ ".gz" , ".zip" ] :
is_gzip = True
else :
is_gzip = False
return is_gzip |
def emit_children ( self , type = 'other' ) :
'''emit _ children
High - level api : Emit a string presentation of the model .
Parameters
type : ` str `
Type of model content required . Its value can be ' other ' , ' rpc ' , or
' notification ' .
Returns
str
A string presentation of the model that is very similar to the
output of ' pyang - f tree ' ''' | def is_type ( element , type ) :
type_info = element . get ( 'type' )
if type == type_info :
return True
if type == 'rpc' or type == 'notification' :
return False
if type_info == 'rpc' or type_info == 'notification' :
return False
return True
ret = [ ]
for root in [ i for i in self . tree . getchildren ( ) if is_type ( i , type ) ] :
for i in root . iter ( ) :
line = self . get_depth_str ( i , type = type )
name_str = self . get_name_str ( i )
room_consumed = len ( name_str )
line += name_str
if i . get ( 'type' ) == 'anyxml' or i . get ( 'type' ) == 'anydata' or i . get ( 'datatype' ) is not None or i . get ( 'if-feature' ) is not None :
line += self . get_datatype_str ( i , room_consumed )
ret . append ( line )
return ret |
def timer ( fun , * a , ** k ) :
"""define a timer for a rule function
for log and statistic purposes""" | @ wraps ( fun )
def timer ( * a , ** k ) :
start = arrow . now ( )
ret = fun ( * a , ** k )
end = arrow . now ( )
print ( 'timer:fun: %s\n start:%s,end:%s, took [%s]' % ( str ( fun ) , str ( start ) , str ( end ) , str ( end - start ) ) )
return ret
return timer |
def roles ( self , value ) :
"""Setter for * * self . _ _ roles * * attribute .
: param value : Attribute value .
: type value : dict""" | if value is not None :
assert type ( value ) is dict , "'{0}' attribute: '{1}' type is not 'dict'!" . format ( "roles" , value )
for key in value :
assert type ( key ) is Qt . ItemDataRole , "'{0}' attribute: '{1}' type is not 'Qt.ItemDataRole'!" . format ( "roles" , key )
self . __roles = value |
def html_to_pdf ( content , encoding = "utf-8" , link_callback = fetch_resources , ** kwargs ) :
"""Converts html ` ` content ` ` into PDF document .
: param unicode content : html content
: returns : PDF content
: rtype : : class : ` bytes `
: raises : : exc : ` ~ easy _ pdf . exceptions . PDFRenderingError `""" | src = BytesIO ( content . encode ( encoding ) )
dest = BytesIO ( )
pdf = pisa . pisaDocument ( src , dest , encoding = encoding , link_callback = link_callback , ** kwargs )
if pdf . err :
logger . error ( "Error rendering PDF document" )
for entry in pdf . log :
if entry [ 0 ] == xhtml2pdf . default . PML_ERROR :
logger_x2p . error ( "line %s, msg: %s, fragment: %s" , entry [ 1 ] , entry [ 2 ] , entry [ 3 ] )
raise PDFRenderingError ( "Errors rendering PDF" , content = content , log = pdf . log )
if pdf . warn :
for entry in pdf . log :
if entry [ 0 ] == xhtml2pdf . default . PML_WARNING :
logger_x2p . warning ( "line %s, msg: %s, fragment: %s" , entry [ 1 ] , entry [ 2 ] , entry [ 3 ] )
return dest . getvalue ( ) |
def from_model ( cls , model_name , ** kwargs ) :
"""Define a grid using the specifications of a given model .
Parameters
model _ name : string
Name the model ( see : func : ` get _ supported _ models ` for available
model names ) .
Supports multiple formats ( e . g . , ' GEOS5 ' , ' GEOS - 5 ' or ' GEOS _ 5 ' ) .
* * kwargs : string
Parameters that override the model or default grid
settings ( See Other Parameters below ) .
Returns
A : class : ` CTMGrid ` object .
Other Parameters
resolution : ( float , float )
Horizontal grid resolution ( lon , lat ) or ( DI , DJ ) [ degrees ]
Psurf : float
Average surface pressure [ hPa ] ( default : 1013.15)
Notes
Regridded vertical models may have several valid names ( e . g . ,
' GEOS5_47L ' and ' GEOS5 _ REDUCED ' refer to the same model ) .""" | settings = _get_model_info ( model_name )
model = settings . pop ( 'model_name' )
for k , v in list ( kwargs . items ( ) ) :
if k in ( 'resolution' , 'Psurf' ) :
settings [ k ] = v
return cls ( model , ** settings ) |
def V_from_h ( h , D , L , horizontal = True , sideA = None , sideB = None , sideA_a = 0 , sideB_a = 0 , sideA_f = None , sideA_k = None , sideB_f = None , sideB_k = None ) :
r'''Calculates partially full volume of a vertical or horizontal tank with
different head types according to [ 1 ] _ .
Parameters
h : float
Height of the liquid in the tank , [ m ]
D : float
Diameter of the cylindrical section of the tank , [ m ]
L : float
Length of the main cylindrical section of the tank , [ m ]
horizontal : bool , optional
Whether or not the tank is a horizontal or vertical tank
sideA : string , optional
The left ( or bottom for vertical ) head of the tank ' s type ; one of
[ None , ' conical ' , ' ellipsoidal ' , ' torispherical ' , ' guppy ' , ' spherical ' ] .
sideB : string , optional
The right ( or top for vertical ) head of the tank ' s type ; one of
[ None , ' conical ' , ' ellipsoidal ' , ' torispherical ' , ' guppy ' , ' spherical ' ] .
sideA _ a : float , optional
The distance the head as specified by sideA extends down or to the left
from the main cylindrical section , [ m ]
sideB _ a : float , optional
The distance the head as specified by sideB extends up or to the right
from the main cylindrical section , [ m ]
sideA _ f : float , optional
Dish - radius parameter for side A ; fD = dish radius [ 1 / m ]
sideA _ k : float , optional
knuckle - radius parameter for side A ; kD = knuckle radius [ 1 / m ]
sideB _ f : float , optional
Dish - radius parameter for side B ; fD = dish radius [ 1 / m ]
sideB _ k : float , optional
knuckle - radius parameter for side B ; kD = knuckle radius [ 1 / m ]
Returns
V : float
Volume up to h [ m ^ 3]
Examples
> > > V _ from _ h ( h = 7 , D = 1.5 , L = 5 . , horizontal = False , sideA = ' conical ' ,
. . . sideB = ' conical ' , sideA _ a = 2 . , sideB _ a = 1 . )
10.013826583317465
References
. . [ 1 ] Jones , D . " Compute Fluid Volumes in Vertical Tanks . " Chemical
Processing . December 18 , 2003.
http : / / www . chemicalprocessing . com / articles / 2003/193/''' | if sideA not in [ None , 'conical' , 'ellipsoidal' , 'torispherical' , 'spherical' , 'guppy' ] :
raise Exception ( 'Unspoorted head type for side A' )
if sideB not in [ None , 'conical' , 'ellipsoidal' , 'torispherical' , 'spherical' , 'guppy' ] :
raise Exception ( 'Unspoorted head type for side B' )
R = D / 2.
V = 0
if horizontal : # Conical case
if sideA == 'conical' :
V += V_horiz_conical ( D , L , sideA_a , h , headonly = True )
if sideB == 'conical' :
V += V_horiz_conical ( D , L , sideB_a , h , headonly = True )
# Elliosoidal case
if sideA == 'ellipsoidal' :
V += V_horiz_ellipsoidal ( D , L , sideA_a , h , headonly = True )
if sideB == 'ellipsoidal' :
V += V_horiz_ellipsoidal ( D , L , sideB_a , h , headonly = True )
# Guppy case
if sideA == 'guppy' :
V += V_horiz_guppy ( D , L , sideA_a , h , headonly = True )
if sideB == 'guppy' :
V += V_horiz_guppy ( D , L , sideB_a , h , headonly = True )
# Spherical case
if sideA == 'spherical' :
V += V_horiz_spherical ( D , L , sideA_a , h , headonly = True )
if sideB == 'spherical' :
V += V_horiz_spherical ( D , L , sideB_a , h , headonly = True )
# Torispherical case
if sideA == 'torispherical' :
V += V_horiz_torispherical ( D , L , sideA_f , sideA_k , h , headonly = True )
if sideB == 'torispherical' :
V += V_horiz_torispherical ( D , L , sideB_f , sideB_k , h , headonly = True )
if h > D : # Must be before Af , which will raise a domain error
raise Exception ( 'Input height is above top of tank' )
Af = R ** 2 * acos ( ( R - h ) / R ) - ( R - h ) * ( 2 * R * h - h ** 2 ) ** 0.5
V += L * Af
else : # Bottom head
if sideA in [ 'conical' , 'ellipsoidal' , 'torispherical' , 'spherical' ] :
if sideA == 'conical' :
V += V_vertical_conical ( D , sideA_a , h = min ( sideA_a , h ) )
if sideA == 'ellipsoidal' :
V += V_vertical_ellipsoidal ( D , sideA_a , h = min ( sideA_a , h ) )
if sideA == 'spherical' :
V += V_vertical_spherical ( D , sideA_a , h = min ( sideA_a , h ) )
if sideA == 'torispherical' :
V += V_vertical_torispherical ( D , sideA_f , sideA_k , h = min ( sideA_a , h ) )
# Cylindrical section
if h >= sideA_a + L :
V += pi / 4 * D ** 2 * L
# All middle
elif h > sideA_a :
V += pi / 4 * D ** 2 * ( h - sideA_a )
# Partial middle
# Top head
if h > sideA_a + L :
h2 = sideB_a - ( h - sideA_a - L )
if sideB == 'conical' :
V += V_vertical_conical ( D , sideB_a , h = sideB_a )
V -= V_vertical_conical ( D , sideB_a , h = h2 )
if sideB == 'ellipsoidal' :
V += V_vertical_ellipsoidal ( D , sideB_a , h = sideB_a )
V -= V_vertical_ellipsoidal ( D , sideB_a , h = h2 )
if sideB == 'spherical' :
V += V_vertical_spherical ( D , sideB_a , h = sideB_a )
V -= V_vertical_spherical ( D , sideB_a , h = h2 )
if sideB == 'torispherical' :
V += V_vertical_torispherical ( D , sideB_f , sideB_k , h = sideB_a )
V -= V_vertical_torispherical ( D , sideB_f , sideB_k , h = h2 )
if h > L + sideA_a + sideB_a :
raise Exception ( 'Input height is above top of tank' )
return V |
def sh_e_out ( cls , cmd , ** kwargs ) :
"""Run the command . and returns the stdout .""" | cmd_kwargs = { 'stdout' : subprocess . PIPE , }
cmd_kwargs . update ( kwargs )
return cls . sh_e ( cmd , ** cmd_kwargs ) [ 0 ] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.