signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def add_subprocess ( self , name , proc ) :
"""Adds a single subprocess to this process .
: param string name : name of the subprocess
: param proc : a Process object
: type proc : : class : ` ~ climlab . process . process . Process `
: raises : : exc : ` ValueError `
if ` ` proc ` ` is not a process
: Example :
Replacing an albedo subprocess through adding a subprocess with
same name : :
> > > from climlab . model . ebm import EBM _ seasonal
> > > from climlab . surface . albedo import StepFunctionAlbedo
> > > # creating EBM model
> > > ebm _ s = EBM _ seasonal ( )
> > > print ebm _ s
. . code - block : : none
: emphasize - lines : 8
climlab Process of type < class ' climlab . model . ebm . EBM _ seasonal ' > .
State variables and domain shapes :
Ts : ( 90 , 1)
The subprocess tree :
top : < class ' climlab . model . ebm . EBM _ seasonal ' >
diffusion : < class ' climlab . dynamics . diffusion . MeridionalDiffusion ' >
LW : < class ' climlab . radiation . AplusBT . AplusBT ' >
albedo : < class ' climlab . surface . albedo . P2Albedo ' >
insolation : < class ' climlab . radiation . insolation . DailyInsolation ' >
> > > # creating and adding albedo feedback subprocess
> > > step _ albedo = StepFunctionAlbedo ( state = ebm _ s . state , * * ebm _ s . param )
> > > ebm _ s . add _ subprocess ( ' albedo ' , step _ albedo )
> > > print ebm _ s
. . code - block : : none
: emphasize - lines : 8
climlab Process of type < class ' climlab . model . ebm . EBM _ seasonal ' > .
State variables and domain shapes :
Ts : ( 90 , 1)
The subprocess tree :
top : < class ' climlab . model . ebm . EBM _ seasonal ' >
diffusion : < class ' climlab . dynamics . diffusion . MeridionalDiffusion ' >
LW : < class ' climlab . radiation . AplusBT . AplusBT ' >
albedo : < class ' climlab . surface . albedo . StepFunctionAlbedo ' >
iceline : < class ' climlab . surface . albedo . Iceline ' >
cold _ albedo : < class ' climlab . surface . albedo . ConstantAlbedo ' >
warm _ albedo : < class ' climlab . surface . albedo . P2Albedo ' >
insolation : < class ' climlab . radiation . insolation . DailyInsolation ' >""" | if isinstance ( proc , Process ) :
self . subprocess . update ( { name : proc } )
self . has_process_type_list = False
# Add subprocess diagnostics to parent
# ( if there are no name conflicts )
for diagname , value in proc . diagnostics . items ( ) : # if not ( diagname in self . diagnostics or hasattr ( self , diagname ) ) :
# self . add _ diagnostic ( diagname , value )
self . add_diagnostic ( diagname , value )
else :
raise ValueError ( 'subprocess must be Process object' ) |
def active ( self ) :
"""Return the currently active : class : ` ~ opentracing . Scope ` which
can be used to access the currently active
: attr : ` Scope . span ` .
: return : the : class : ` ~ opentracing . Scope ` that is active ,
or ` ` None ` ` if not available .""" | task = self . _get_task ( )
if not task :
return super ( AsyncioScopeManager , self ) . active
return self . _get_task_scope ( task ) |
def is_required ( self , name ) :
"""Return true iff the schema element with the given name is required .""" | return self . schema_element ( name ) . repetition_type == parquet_thrift . FieldRepetitionType . REQUIRED |
def create_from_user_input ( raw_properties , jamfile_module , location ) :
"""Creates a property - set from the input given by the user , in the
context of ' jamfile - module ' at ' location '""" | assert is_iterable_typed ( raw_properties , basestring )
assert isinstance ( jamfile_module , basestring )
assert isinstance ( location , basestring )
properties = property . create_from_strings ( raw_properties , True )
properties = property . translate_paths ( properties , location )
properties = property . translate_indirect ( properties , jamfile_module )
project_id = get_manager ( ) . projects ( ) . attributeDefault ( jamfile_module , 'id' , None )
if not project_id :
project_id = os . path . abspath ( location )
properties = property . translate_dependencies ( properties , project_id , location )
properties = property . expand_subfeatures_in_conditions ( properties )
return create ( properties ) |
def get_parallel_regions_block ( batch ) :
"""CWL target to retrieve block group of callable regions for parallelization .
Uses blocking to handle multicore runs .""" | samples = [ utils . to_single_data ( d ) for d in batch ]
regions = _get_parallel_regions ( samples [ 0 ] )
out = [ ]
# Currently don ' t have core information here so aim for about 10 items per partition
n = 10
for region_block in tz . partition_all ( n , regions ) :
out . append ( { "region_block" : [ "%s:%s-%s" % ( c , s , e ) for c , s , e in region_block ] } )
return out |
def _restart_target ( self ) :
"""Restart our Target .""" | if self . _server :
if self . _server . returncode is None :
self . _server . kill ( )
time . sleep ( 0.2 )
self . _server = subprocess . Popen ( "python session_server.py" , stdout = subprocess . PIPE , stderr = subprocess . PIPE , shell = True )
time . sleep ( 0.2 ) |
def fontsize ( count , maxsize , minsize , maxcount ) :
'''A formula for determining font sizes .''' | size = int ( maxsize - ( maxsize ) * ( ( float ( maxcount - count ) / maxcount ) ) )
if size < minsize :
size = minsize
return size |
def name ( self , node , children ) :
'name = ~ " [ a - z ] + " _' | return self . env . get ( node . text . strip ( ) , - 1 ) |
def add_rule ( name , localport , protocol = 'tcp' , action = 'allow' , dir = 'in' , remoteip = 'any' ) :
'''Add a new inbound or outbound rule to the firewall policy
Args :
name ( str ) : The name of the rule . Must be unique and cannot be " all " .
Required .
localport ( int ) : The port the rule applies to . Must be a number between
0 and 65535 . Can be a range . Can specify multiple ports separated by
commas . Required .
protocol ( Optional [ str ] ) : The protocol . Can be any of the following :
- A number between 0 and 255
- icmpv4
- icmpv6
- tcp
- udp
- any
action ( Optional [ str ] ) : The action the rule performs . Can be any of the
following :
- allow
- block
- bypass
dir ( Optional [ str ] ) : The direction . Can be ` ` in ` ` or ` ` out ` ` .
remoteip ( Optional [ str ] ) : The remote IP . Can be any of the following :
- any
- localsubnet
- dns
- dhcp
- wins
- defaultgateway
- Any valid IPv4 address ( 192.168.0.12)
- Any valid IPv6 address ( 2002:9b3b : 1a31:4:208:74ff : fe39:6c43)
- Any valid subnet ( 192.168.1.0/24)
- Any valid range of IP addresses ( 192.168.0.1-192.168.0.12)
- A list of valid IP addresses
Can be combinations of the above separated by commas .
. . versionadded : : 2016.11.6
Example :
. . code - block : : yaml
open _ smb _ port :
win _ firewall . add _ rule :
- name : SMB ( 445)
- localport : 445
- protocol : tcp
- action : allow''' | ret = { 'name' : name , 'result' : True , 'changes' : { } , 'comment' : '' }
# Check if rule exists
if not __salt__ [ 'firewall.rule_exists' ] ( name ) :
ret [ 'changes' ] = { 'new rule' : name }
else :
ret [ 'comment' ] = 'A rule with that name already exists'
return ret
if __opts__ [ 'test' ] :
ret [ 'result' ] = not ret [ 'changes' ] or None
ret [ 'comment' ] = ret [ 'changes' ]
ret [ 'changes' ] = { }
return ret
# Add rule
try :
__salt__ [ 'firewall.add_rule' ] ( name , localport , protocol , action , dir , remoteip )
except CommandExecutionError :
ret [ 'comment' ] = 'Could not add rule'
return ret |
def g_coil ( FlowPlant , IDTube , RadiusCoil , Temp ) :
"""We need a reference for this .
Karen ' s thesis likely has this equation and the reference .""" | return ( g_straight ( FlowPlant , IDTube ) . magnitude * ( 1 + 0.033 * np . log10 ( dean_number ( FlowPlant , IDTube , RadiusCoil , Temp ) ) ** 4 ) ** ( 1 / 2 ) ) |
def create ( self , body , total_fee , auth_code , client_ip = None , out_trade_no = None , detail = None , attach = None , fee_type = 'CNY' , goods_tag = None , device_info = None , limit_pay = None ) :
"""刷卡支付接口
: param device _ info : 可选 , 终端设备号 ( 商户自定义 , 如门店编号 )
: param body : 商品描述
: param detail : 可选 , 商品详情
: param attach : 可选 , 附加数据 , 在查询API和支付通知中原样返回 , 该字段主要用于商户携带订单的自定义数据
: param client _ ip : 可选 , APP和网页支付提交用户端ip , Native支付填调用微信支付API的机器IP
: param out _ trade _ no : 可选 , 商户订单号 , 默认自动生成
: param total _ fee : 总金额 , 单位分
: param fee _ type : 可选 , 符合ISO 4217标准的三位字母代码 , 默认人民币 : CNY
: param goods _ tag : 可选 , 商品标记 , 代金券或立减优惠功能的参数
: param limit _ pay : 可选 , 指定支付方式 , no _ credit - - 指定不能使用信用卡支付
: param auth _ code : 授权码 , 扫码支付授权码 , 设备读取用户微信中的条码或者二维码信息
: return : 返回的结果数据""" | now = datetime . now ( )
if not out_trade_no :
out_trade_no = '{0}{1}{2}' . format ( self . mch_id , now . strftime ( '%Y%m%d%H%M%S' ) , random . randint ( 1000 , 10000 ) )
data = { 'appid' : self . appid , 'device_info' : device_info , 'body' : body , 'detail' : detail , 'attach' : attach , 'out_trade_no' : out_trade_no , 'total_fee' : total_fee , 'fee_type' : fee_type , 'spbill_create_ip' : client_ip or get_external_ip ( ) , 'goods_tag' : goods_tag , 'limit_pay' : limit_pay , 'auth_code' : auth_code , }
return self . _post ( 'pay/micropay' , data = data ) |
def _es_margin ( settings ) :
"""Extract margin formating related subset of widget settings .""" | return { k : settings [ k ] for k in ( ConsoleWidget . SETTING_MARGIN , ConsoleWidget . SETTING_MARGIN_LEFT , ConsoleWidget . SETTING_MARGIN_RIGHT , ConsoleWidget . SETTING_MARGIN_CHAR ) } |
def search_tor_node ( self , ip ) :
"""Lookup an IP address to check if it is a known tor exit node .
: param ip : The IP address to lookup
: type ip : str
: return : Data relative to the tor node . If ` ip ` is a tor exit node
it will contain a ` node ` key with the hash of the node and
a ` last _ status ` key with the last update time of the node .
If ` ip ` is not a tor exit node , the function will return an
empty dictionary .
: rtype : dict""" | data = { }
tmp = { }
present = datetime . utcnow ( ) . replace ( tzinfo = pytz . utc )
for line in self . _get_raw_data ( ) . splitlines ( ) :
params = line . split ( ' ' )
if params [ 0 ] == 'ExitNode' :
tmp [ 'node' ] = params [ 1 ]
elif params [ 0 ] == 'ExitAddress' :
tmp [ 'last_status' ] = params [ 2 ] + 'T' + params [ 3 ] + '+0000'
last_status = parse ( tmp [ 'last_status' ] )
if ( self . delta is None or ( present - last_status ) < self . delta ) :
data [ params [ 1 ] ] = tmp
tmp = { }
else :
pass
return data . get ( ip , { } ) |
def printJobChildren ( self ) :
"""Takes a list of jobs , and prints their successors .""" | for job in self . jobsToReport :
children = "CHILDREN_OF_JOB:%s " % job
for level , jobList in enumerate ( job . stack ) :
for childJob in jobList :
children += "\t(CHILD_JOB:%s,PRECEDENCE:%i)" % ( childJob , level )
print ( children ) |
def get ( self , sid ) :
"""Constructs a IncomingPhoneNumberContext
: param sid : The unique string that identifies the resource
: returns : twilio . rest . api . v2010 . account . incoming _ phone _ number . IncomingPhoneNumberContext
: rtype : twilio . rest . api . v2010 . account . incoming _ phone _ number . IncomingPhoneNumberContext""" | return IncomingPhoneNumberContext ( self . _version , account_sid = self . _solution [ 'account_sid' ] , sid = sid , ) |
def network_xml ( identifier , xml , address = None ) :
"""Fills the XML file with the required fields .
* name
* uuid
* bridge
* ip
* * dhcp""" | netname = identifier [ : 8 ]
network = etree . fromstring ( xml )
subelement ( network , './/name' , 'name' , identifier )
subelement ( network , './/uuid' , 'uuid' , identifier )
subelement ( network , './/bridge' , 'bridge' , None , name = 'virbr-%s' % netname )
if address is not None :
set_address ( network , address )
return etree . tostring ( network ) . decode ( 'utf-8' ) |
def _add_or_remove_flag ( self , flag , add ) :
"""Add the given ` flag ` if ` add ` is True , remove it otherwise .""" | meth = self . add_flag if add else self . remove_flag
meth ( flag ) |
def getcol ( self , columnname , startrow = 0 , nrow = - 1 , rowincr = 1 ) :
"""Get the contents of a column or part of it .
It is returned as a numpy array .
If the column contains arrays , they should all have the same shape .
An exception is thrown if they differ in shape . In that case the
method : func : ` getvarcol ` should be used instead .
The column can be sliced by giving a start row ( default 0 ) , number of
rows ( default all ) , and row stride ( default 1 ) .""" | # try : # trial code to read using a vector of rownrs
# nr = len ( startrow )
# if nrow < 0:
# nrow = nr
# if nrow = = 0:
# return numpy . array ( )
# for inx in range ( nrow ) :
# i = inx *
# except :
return self . _getcol ( columnname , startrow , nrow , rowincr ) |
def generate_return_periods ( qout_file , return_period_file , num_cpus = multiprocessing . cpu_count ( ) , storm_duration_days = 7 , method = 'weibull' ) :
"""Generate return period from RAPID Qout file""" | # get ERA Interim Data Analyzed
with RAPIDDataset ( qout_file ) as qout_nc_file :
print ( "Setting up Return Periods File ..." )
return_period_nc = Dataset ( return_period_file , 'w' )
return_period_nc . createDimension ( 'rivid' , qout_nc_file . size_river_id )
timeSeries_var = return_period_nc . createVariable ( 'rivid' , 'i4' , ( 'rivid' , ) )
timeSeries_var . long_name = ( 'unique identifier for each river reach' )
max_flow_var = return_period_nc . createVariable ( 'max_flow' , 'f8' , ( 'rivid' , ) )
max_flow_var . long_name = 'maximum streamflow'
max_flow_var . units = 'm3/s'
if method == 'weibull' :
return_period_20_var = return_period_nc . createVariable ( 'return_period_20' , 'f8' , ( 'rivid' , ) )
return_period_20_var . long_name = '20 year return period flow'
return_period_20_var . units = 'm3/s'
if method == 'gumble' :
return_period_100_var = return_period_nc . createVariable ( 'return_period_100' , 'f8' , ( 'rivid' , ) )
return_period_100_var . long_name = '100 year return period flow'
return_period_100_var . units = 'm3/s'
return_period_50_var = return_period_nc . createVariable ( 'return_period_50' , 'f8' , ( 'rivid' , ) )
return_period_50_var . long_name = '50 year return period flow'
return_period_50_var . units = 'm3/s'
return_period_20_var = return_period_nc . createVariable ( 'return_period_20' , 'f8' , ( 'rivid' , ) )
return_period_20_var . long_name = '20 year return period flow'
return_period_20_var . units = 'm3/s'
if method == 'log_pearson' :
return_period_100_var = return_period_nc . createVariable ( 'return_period_100' , 'f8' , ( 'rivid' , ) )
return_period_100_var . long_name = '100 year return period flow'
return_period_100_var . units = 'm3/s'
return_period_50_var = return_period_nc . createVariable ( 'return_period_50' , 'f8' , ( 'rivid' , ) )
return_period_50_var . long_name = '50 year return period flow'
return_period_50_var . units = 'm3/s'
return_period_25_var = return_period_nc . createVariable ( 'return_period_25' , 'f8' , ( 'rivid' , ) )
return_period_25_var . long_name = '25 year return period flow'
return_period_25_var . units = 'm3/s'
return_period_10_var = return_period_nc . createVariable ( 'return_period_10' , 'f8' , ( 'rivid' , ) )
return_period_10_var . long_name = '10 year return period flow'
return_period_10_var . units = 'm3/s'
return_period_2_var = return_period_nc . createVariable ( 'return_period_2' , 'f8' , ( 'rivid' , ) )
return_period_2_var . long_name = '2 year return period flow'
return_period_2_var . units = 'm3/s'
lat_var = return_period_nc . createVariable ( 'lat' , 'f8' , ( 'rivid' , ) , fill_value = - 9999.0 )
lon_var = return_period_nc . createVariable ( 'lon' , 'f8' , ( 'rivid' , ) , fill_value = - 9999.0 )
add_latlon_metadata ( lat_var , lon_var )
return_period_nc . variables [ 'lat' ] [ : ] = qout_nc_file . qout_nc . variables [ 'lat' ] [ : ]
return_period_nc . variables [ 'lon' ] [ : ] = qout_nc_file . qout_nc . variables [ 'lon' ] [ : ]
river_id_list = qout_nc_file . get_river_id_array ( )
return_period_nc . variables [ 'rivid' ] [ : ] = river_id_list
return_period_nc . return_period_method = method
return_period_nc . close ( )
time_array = qout_nc_file . get_time_array ( )
log ( "Extracting Data and Generating Return Periods ..." )
num_years = int ( ( datetime . utcfromtimestamp ( time_array [ - 1 ] ) - datetime . utcfromtimestamp ( time_array [ 0 ] ) ) . days / 365.2425 )
time_steps_per_day = ( 24 * 3600 ) / float ( ( datetime . utcfromtimestamp ( time_array [ 1 ] ) - datetime . utcfromtimestamp ( time_array [ 0 ] ) ) . total_seconds ( ) )
step = max ( 1 , int ( time_steps_per_day * storm_duration_days ) )
# generate multiprocessing jobs
# pylint : disable = no - member
mp_lock = multiprocessing . Manager ( ) . Lock ( )
job_combinations = [ ]
partition_index_list = partition ( river_id_list , num_cpus * 2 ) [ 1 ]
for sub_partition_index_list in partition_index_list : # pylint : disable = len - as - condition
if len ( sub_partition_index_list ) > 0 :
job_combinations . append ( ( qout_file , return_period_file , sub_partition_index_list , step , num_years , method , mp_lock ) )
pool = multiprocessing . Pool ( num_cpus )
pool . map ( generate_single_return_period , job_combinations )
pool . close ( )
pool . join ( ) |
def generate_iucv_authfile ( fn , client ) :
"""Generate the iucv _ authorized _ userid file""" | lines = [ '#!/bin/bash\n' , 'echo -n %s > /etc/iucv_authorized_userid\n' % client ]
with open ( fn , 'w' ) as f :
f . writelines ( lines ) |
def plotPlainImg ( sim , cam , rawdata , t , odir ) :
"""No subplots , just a plan
http : / / stackoverflow . com / questions / 22408237 / named - colors - in - matplotlib""" | for R , C in zip ( rawdata , cam ) :
fg = figure ( )
ax = fg . gca ( )
ax . set_axis_off ( )
# no ticks
ax . imshow ( R [ t , : , : ] , origin = 'lower' , vmin = max ( C . clim [ 0 ] , 1 ) , vmax = C . clim [ 1 ] , cmap = 'gray' )
ax . text ( 0.05 , 0.075 , datetime . utcfromtimestamp ( C . tKeo [ t ] ) . strftime ( '%Y-%m-%dT%H:%M:%S.%f' ) [ : - 3 ] , ha = 'left' , va = 'top' , transform = ax . transAxes , color = 'limegreen' , # weight = ' bold ' ,
size = 24 )
writeplots ( fg , 'cam{}rawFrame' . format ( C . name ) , t , odir ) |
def add_partitioning_metadata ( portal ) :
"""Add metadata columns required for partitioning machinery""" | logger . info ( "Adding partitioning metadata" )
add_metadata ( portal , CATALOG_ANALYSIS_REQUEST_LISTING , 'getRawParentAnalysisRequest' )
add_metadata ( portal , CATALOG_ANALYSIS_REQUEST_LISTING , "getDescendantsUIDs" ) |
def add_file_argument ( self , * args , mode = 'r' , buffering = 1 , filetype_options = None , ** kwargs ) :
"""Add a tab - completion safe FileType argument . This argument
differs from a normal argparse . FileType based argument in that the
value is a factory function that returns a file handle instead of
providing an already open file handle . There are various reasons
why this is a better approach but it is also required to avoid
erroneous creation of files with shellish tab completion .""" | type_ = supplement . SafeFileType ( mode = mode , bufsize = buffering , ** filetype_options or { } )
return self . add_argument ( * args , type = type_ , ** kwargs ) |
def scan_ip84 ( self , region = 'mainland' , page = 1 ) :
"""Scan candidate proxies from http : / / ip84 . com
Args :
region : Either ' mainland ' or ' overseas ' .
page : An integer indicating how many pages to be scanned .""" | self . logger . info ( 'start scanning http://ip84.com for proxy list...' )
for i in range ( 1 , page + 1 ) :
if region == 'mainland' :
url = 'http://ip84.com/dlgn/{}' . format ( i )
elif region == 'overseas' :
url = 'http://ip84.com/gwgn/{}' . format ( i )
else :
url = 'http://ip84.com/gn/{}' . format ( i )
response = requests . get ( url )
soup = BeautifulSoup ( response . content , 'lxml' )
table = soup . find ( 'table' , class_ = 'list' )
for tr in table . find_all ( 'tr' ) :
if tr . th is not None :
continue
info = tr . find_all ( 'td' )
protocol = info [ 4 ] . string . lower ( )
addr = '{}:{}' . format ( info [ 0 ] . string , info [ 1 ] . string )
self . proxy_queue . put ( { 'addr' : addr , 'protocol' : protocol } ) |
def get_path_fields ( self , path , method , view ) :
"""Return a list of ` coreapi . Field ` instances corresponding to any
templated path variables .""" | model = getattr ( getattr ( view , 'queryset' , None ) , 'model' , None )
fields = [ ]
for variable in uritemplate . variables ( path ) :
if variable == 'version' :
continue
title = ''
description = ''
schema_cls = coreschema . String
kwargs = { }
if model is not None : # Attempt to infer a field description if possible .
try :
model_field = model . _meta . get_field ( variable )
except :
model_field = None
if model_field is not None and model_field . verbose_name :
title = force_text ( model_field . verbose_name )
if model_field is not None and model_field . help_text :
description = force_text ( model_field . help_text )
elif model_field is not None and model_field . primary_key :
description = get_pk_description ( model , model_field )
if hasattr ( view , 'lookup_value_regex' ) and view . lookup_field == variable :
kwargs [ 'pattern' ] = view . lookup_value_regex
elif isinstance ( model_field , models . AutoField ) :
schema_cls = coreschema . Integer
field = Field ( name = variable , location = 'path' , required = True , schema = schema_cls ( title = title , description = description , ** kwargs ) )
fields . append ( field )
return fields |
def set_noise_filter ( self , user_gpio , steady , active ) :
"""Sets a noise filter on a GPIO .
Level changes on the GPIO are ignored until a level which has
been stable for [ * steady * ] microseconds is detected . Level
changes on the GPIO are then reported for [ * active * ]
microseconds after which the process repeats .
user _ gpio : = 0-31
steady : = 0-300000
active : = 0-100000
Returns 0 if OK , otherwise PI _ BAD _ USER _ GPIO , or PI _ BAD _ FILTER .
This filter affects the GPIO samples returned to callbacks set up
with [ * callback * ] and [ * wait _ for _ edge * ] .
It does not affect levels read by [ * read * ] ,
[ * read _ bank _ 1 * ] , or [ * read _ bank _ 2 * ] .
Level changes before and after the active period may
be reported . Your software must be designed to cope with
such reports .
pi . set _ noise _ filter ( 23 , 1000 , 5000)""" | # pigpio message format
# I p1 user _ gpio
# I p2 steady
# I p3 4
# # extension # #
# I active
extents = [ struct . pack ( "I" , active ) ]
res = yield from self . _pigpio_aio_command_ext ( _PI_CMD_FN , user_gpio , steady , 4 , extents )
return _u2i ( res ) |
def from_json ( cls , json ) :
"""Inherit doc .""" | key_range_iter_cls = _KEY_RANGE_ITERATORS [ json [ "key_range_iter_cls" ] ]
obj = cls ( key_ranges . KeyRangesFactory . from_json ( json [ "key_ranges" ] ) , model . QuerySpec . from_json ( json [ "query_spec" ] ) , key_range_iter_cls )
current_iter = None
if json [ "current_iter" ] :
current_iter = key_range_iter_cls . from_json ( json [ "current_iter" ] )
# pylint : disable = protected - access
obj . _current_iter = current_iter
return obj |
def add ( self , payload = None ) :
"""Adds a new document to the data store and returns its id .
Args :
payload ( dict ) : Dictionary of initial data that should be stored
in the new document in the meta section .
Raises :
DataStoreNotConnected : If the data store is not connected to the server .
Returns :
str : The id of the newly created document .""" | try :
db = self . _client [ self . database ]
col = db [ WORKFLOW_DATA_COLLECTION_NAME ]
return str ( col . insert_one ( { DataStoreDocumentSection . Meta : payload if isinstance ( payload , dict ) else { } , DataStoreDocumentSection . Data : { } } ) . inserted_id )
except ConnectionFailure :
raise DataStoreNotConnected ( ) |
def mk_tmpl ( self , path , tmpl , ctx , mode = None ) :
"""Create a file from a template if it doesn ' t already exist .""" | path = os . path . abspath ( path )
if os . path . isfile ( path ) :
logger . warning ( "File %s already exists, not creating it." , tmpl )
with open ( path , 'w' ) as fd :
fd . write ( tmpl . format ( ** ctx ) )
if mode :
os . chmod ( path , mode ) |
def SetMaxPowerUpCurrent ( self , i ) :
"""Set the max power up current .""" | if i < 0 or i > 8 :
raise MonsoonError ( ( "Target max current %sA, is out of acceptable " "range [0, 8]." ) % i )
val = 1023 - int ( ( i / 8 ) * 1023 )
self . _SendStruct ( "BBB" , 0x01 , 0x08 , val & 0xff )
self . _SendStruct ( "BBB" , 0x01 , 0x09 , val >> 8 ) |
def register_from_options ( options = None , template = None , extractor = None ) :
"""Register the spec codec using the provided options""" | if template is None :
from noseOfYeti . plugins . support . spec_options import spec_options as template
if extractor is None :
from noseOfYeti . plugins . support . spec_options import extract_options_dict as extractor
config = Config ( template )
config . setup ( options , extractor )
imports = determine_imports ( extra_imports = ';' . join ( [ d for d in config . extra_import if d ] ) , with_default_imports = config . with_default_imports )
tok = Tokeniser ( default_kls = config . default_kls , import_tokens = imports , wrapped_setup = config . wrapped_setup , with_describe_attrs = not config . no_describe_attrs )
TokeniserCodec ( tok ) . register ( ) |
def locate_unlinked ( gn , size = 100 , step = 20 , threshold = .1 , blen = None ) :
"""Locate variants in approximate linkage equilibrium , where r * * 2 is
below the given ` threshold ` .
Parameters
gn : array _ like , int8 , shape ( n _ variants , n _ samples )
Diploid genotypes at biallelic variants , coded as the number of
alternate alleles per call ( i . e . , 0 = hom ref , 1 = het , 2 = hom alt ) .
size : int
Window size ( number of variants ) .
step : int
Number of variants to advance to the next window .
threshold : float
Maximum value of r * * 2 to include variants .
blen : int , optional
Block length to use for chunked computation .
Returns
loc : ndarray , bool , shape ( n _ variants )
Boolean array where True items locate variants in approximate
linkage equilibrium .
Notes
The value of r * * 2 between each pair of variants is calculated using the
method of Rogers and Huff ( 2008 ) .""" | # check inputs
if not hasattr ( gn , 'shape' ) or not hasattr ( gn , 'dtype' ) :
gn = np . asarray ( gn , dtype = 'i1' )
if gn . ndim != 2 :
raise ValueError ( 'gn must have two dimensions' )
# setup output
loc = np . ones ( gn . shape [ 0 ] , dtype = 'u1' )
# compute in chunks to avoid loading big arrays into memory
blen = get_blen_array ( gn , blen )
blen = max ( blen , 10 * size )
# avoid too small chunks
n_variants = gn . shape [ 0 ]
for i in range ( 0 , n_variants , blen ) : # N . B . , ensure overlap with next window
j = min ( n_variants , i + blen + size )
gnb = np . asarray ( gn [ i : j ] , dtype = 'i1' )
gnb = memoryview_safe ( gnb )
locb = loc [ i : j ]
gn_locate_unlinked_int8 ( gnb , locb , size , step , threshold )
return loc . astype ( 'b1' ) |
def strip_praw_subscription ( subscription ) :
"""Parse through a subscription and return a dict with data ready to be
displayed through the terminal .""" | data = { }
data [ 'object' ] = subscription
if isinstance ( subscription , praw . objects . Multireddit ) :
data [ 'type' ] = 'Multireddit'
data [ 'name' ] = subscription . path
data [ 'title' ] = subscription . description_md
else :
data [ 'type' ] = 'Subscription'
data [ 'name' ] = "/r/" + subscription . display_name
data [ 'title' ] = subscription . title
return data |
def _handleBackspace ( self ) :
"""Handles backspace characters""" | if self . cursorPos > 0 : # print ( ' cp : ' , self . cursorPos , ' was : ' , self . inputBuffer )
self . inputBuffer = self . inputBuffer [ 0 : self . cursorPos - 1 ] + self . inputBuffer [ self . cursorPos : ]
self . cursorPos -= 1
# print ( ' cp : ' , self . cursorPos , ' is : ' , self . inputBuffer )
self . _refreshInputPrompt ( len ( self . inputBuffer ) + 1 ) |
def is_signature_line ( line , sender , classifier ) :
'''Checks if the line belongs to signature . Returns True or False .''' | data = numpy . array ( build_pattern ( line , features ( sender ) ) ) . reshape ( 1 , - 1 )
return classifier . predict ( data ) > 0 |
def validate_record_type ( self , cls ) :
"""Validate given record is acceptable .
> > > s = teststore ( )
> > > s . validate _ record _ type ( ' tstoretest ' )
> > > s . validate _ record _ type ( ' bad ' )
Traceback ( most recent call last ) :
ValueError : Unsupported record type " bad " """ | if self . record_types and cls not in self . record_types :
raise ValueError ( 'Unsupported record type "' + cls + '"' ) |
def imageurl ( self ) :
"""Return the URL of a png image of the 2D structure""" | if self . _imageurl is None :
self . _imageurl = 'http://www.chemspider.com/ImagesHandler.ashx?id=%s' % self . csid
return self . _imageurl |
def P_conditional ( self , i , li , j , lj , y ) :
"""Compute the conditional probability
P _ \t heta ( li | lj , y )
Z ^ { - 1 } exp (
theta _ { i | y } \ indpm { \ lambda _ i = Y }
+ \t heta _ { i , j } \ indpm { \ lambda _ i = \ lambda _ j }
In other words , compute the conditional probability that LF i outputs
li given that LF j output lj , and Y = y , parameterized by
- a class - conditional LF accuracy parameter \t heta _ { i | y }
- a symmetric LF correlation paramter \t heta _ { i , j }""" | Z = np . sum ( [ self . _P ( i , _li , j , lj , y ) for _li in range ( self . k + 1 ) ] )
return self . _P ( i , li , j , lj , y ) / Z |
def SetDecodedStreamSize ( self , decoded_stream_size ) :
"""Sets the decoded stream size .
This function is used to set the decoded stream size if it can be
determined separately .
Args :
decoded _ stream _ size ( int ) : size of the decoded stream in bytes .
Raises :
IOError : if the file - like object is already open .
OSError : if the file - like object is already open .
ValueError : if the decoded stream size is invalid .""" | if self . _is_open :
raise IOError ( 'Already open.' )
if decoded_stream_size < 0 :
raise ValueError ( ( 'Invalid decoded stream size: {0:d} value out of ' 'bounds.' ) . format ( decoded_stream_size ) )
self . _decoded_stream_size = decoded_stream_size |
def key_hash_algo ( self , value ) :
"""A unicode string of the hash algorithm to use when creating the
certificate identifier - " sha1 " ( default ) , or " sha256 " .""" | if value not in set ( [ 'sha1' , 'sha256' ] ) :
raise ValueError ( _pretty_message ( '''
hash_algo must be one of "sha1", "sha256", not %s
''' , repr ( value ) ) )
self . _key_hash_algo = value |
def system ( session ) :
"""Run the system test suite .""" | # Sanity check : Only run system tests if the environment variable is set .
if not os . environ . get ( 'GOOGLE_APPLICATION_CREDENTIALS' , '' ) :
session . skip ( 'Credentials must be set via environment variable.' )
# Use pre - release gRPC for system tests .
session . install ( '--pre' , 'grpcio' )
# Install all test dependencies , then install this package into the
# virtualenv ' s dist - packages .
session . install ( 'mock' , 'pytest' )
for local_dep in LOCAL_DEPS :
session . install ( '-e' , local_dep )
systest_deps = [ '../bigquery/' , '../pubsub/' , '../storage/' , '../test_utils/' , ]
for systest_dep in systest_deps :
session . install ( '-e' , systest_dep )
session . install ( '-e' , '.' )
# Run py . test against the system tests .
session . run ( 'py.test' , '-vvv' , '-s' , 'tests/system' , * session . posargs ) |
def formdata_post ( url , fields ) :
"""Send an HTTP request with a multipart / form - data body for the
given URL and return the data returned by the server .""" | content_type , data = formdata_encode ( fields )
req = urllib2 . Request ( url , data )
req . add_header ( 'Content-Type' , content_type )
return urllib2 . urlopen ( req ) . read ( ) |
def replace_emoticons ( content , excluded_markups ) :
"""Replace the emoticons string by HTML images .
If some markups should be excluded from replacement ,
BeautifulSoup will be used .""" | if not excluded_markups :
return regexp_replace_emoticons ( content )
excluded_markups = excluded_markups . split ( ',' ) + [ '[document]' ]
soup = BeautifulSoup ( content , 'html.parser' )
for content_string in list ( soup . strings ) :
if content_string . parent . name not in excluded_markups :
replaced_content_string = regexp_replace_emoticons ( content_string )
if content_string != replaced_content_string :
content_string . replace_with ( BeautifulSoup ( replaced_content_string , 'html.parser' ) )
return str ( soup ) |
def _merge_list_of_dict ( first , second , prepend = True ) :
'''Merge lists of dictionaries .
Each element of the list is a dictionary having one single key .
That key is then used as unique lookup .
The first element list has higher priority than the second .
When there ' s an overlap between the two lists ,
it won ' t change the position , but the content .''' | first = _cleanup ( first )
second = _cleanup ( second )
if not first and not second :
return [ ]
if not first and second :
return second
if first and not second :
return first
# Determine overlaps
# So we dont change the position of the existing terms / filters
overlaps = [ ]
merged = [ ]
appended = [ ]
for ele in first :
if _lookup_element ( second , ele . keys ( ) [ 0 ] ) :
overlaps . append ( ele )
elif prepend :
merged . append ( ele )
elif not prepend :
appended . append ( ele )
for ele in second :
ele_key = ele . keys ( ) [ 0 ]
if _lookup_element ( overlaps , ele_key ) : # If theres an overlap , get the value from the first
# But inserted into the right position
ele_val_first = _lookup_element ( first , ele_key )
merged . append ( { ele_key : ele_val_first } )
else :
merged . append ( ele )
if not prepend :
merged . extend ( appended )
return merged |
def install_key ( self , key_data ) :
"""Install untrusted repo signing key""" | logger . info ( ( "importing repository signing key {0} " "{1}" . format ( self . key_info [ 'fingerprint' ] , self . key_info [ 'uids' ] [ 0 ] ) ) )
import_result = self . gpg . import_keys ( key_data )
logger . debug ( "import results: {0}" . format ( import_result . results ) ) |
def iter_search_nodes ( self , ** conditions ) :
"""Search nodes in an interative way . Matches are being yield as
they are being found . This avoids to scan the full tree
topology before returning the first matches . Useful when
dealing with huge trees .""" | for n in self . traverse ( ) :
conditions_passed = 0
for key , value in six . iteritems ( conditions ) :
if hasattr ( n , key ) and getattr ( n , key ) == value :
conditions_passed += 1
if conditions_passed == len ( conditions ) :
yield n |
def predict ( self , X ) :
"""Predict values using the model
Parameters
X : { array - like , sparse matrix } of shape [ n _ samples , n _ features ]
Returns
C : numpy array of shape [ n _ samples , n _ outputs ]
Predicted values .""" | if self . _genelm_regressor is None :
raise ValueError ( "SimpleELMRegressor not fitted" )
return self . _genelm_regressor . predict ( X ) |
def extend ( self , content , zorder ) :
"""Extends with a list and a z - order""" | if zorder not in self . _content :
self . _content [ zorder ] = [ ]
self . _content [ zorder ] . extend ( content ) |
def _cache_metrics_metadata ( self , instance ) :
"""Get all the performance counters metadata meaning name / group / description . . .
from the server instance , attached with the corresponding ID""" | # # # < TEST - INSTRUMENTATION >
t = Timer ( )
# # # < / TEST - INSTRUMENTATION >
i_key = self . _instance_key ( instance )
self . metadata_cache . init_instance ( i_key )
self . log . info ( "Warming metrics metadata cache for instance {}" . format ( i_key ) )
server_instance = self . _get_server_instance ( instance )
perfManager = server_instance . content . perfManager
custom_tags = instance . get ( 'tags' , [ ] )
new_metadata = { }
metric_ids = [ ]
# Use old behaviour with metrics to collect defined by our constants
if self . in_compatibility_mode ( instance , log_warning = True ) :
for counter in perfManager . perfCounter :
metric_name = self . format_metric_name ( counter , compatibility = True )
new_metadata [ counter . key ] = { 'name' : metric_name , 'unit' : counter . unitInfo . key }
# Build the list of metrics we will want to collect
if instance . get ( "all_metrics" ) or metric_name in BASIC_METRICS :
metric_ids . append ( vim . PerformanceManager . MetricId ( counterId = counter . key , instance = "*" ) )
else :
collection_level = instance . get ( "collection_level" , 1 )
for counter in perfManager . QueryPerfCounterByLevel ( collection_level ) :
new_metadata [ counter . key ] = { "name" : self . format_metric_name ( counter ) , "unit" : counter . unitInfo . key }
# Build the list of metrics we will want to collect
metric_ids . append ( vim . PerformanceManager . MetricId ( counterId = counter . key , instance = "*" ) )
self . log . info ( "Finished metadata collection for instance {}" . format ( i_key ) )
# Reset metadata
self . metadata_cache . set_metadata ( i_key , new_metadata )
self . metadata_cache . set_metric_ids ( i_key , metric_ids )
self . cache_config . set_last ( CacheConfig . Metadata , i_key , time . time ( ) )
# # # < TEST - INSTRUMENTATION >
self . histogram ( 'datadog.agent.vsphere.metric_metadata_collection.time' , t . total ( ) , tags = custom_tags ) |
def copyfileobj ( src , dst , length = None , exception = OSError ) :
"""Copy length bytes from fileobj src to fileobj dst .
If length is None , copy the entire content .""" | if length == 0 :
return
if length is None :
shutil . copyfileobj ( src , dst )
return
# BUFSIZE = 16 * 1024
blocks , remainder = divmod ( length , BUFSIZE )
# for b in range ( blocks ) :
for _ in range ( blocks ) :
buf = src . read ( BUFSIZE )
if len ( buf ) < BUFSIZE :
raise exception ( "unexpected end of data" )
dst . write ( buf )
if remainder != 0 :
buf = src . read ( remainder )
if len ( buf ) < remainder :
raise exception ( "unexpected end of data" )
dst . write ( buf )
return |
def present ( name , auth = None , ** kwargs ) :
'''Ensure an role exists
name
Name of the role
description
An arbitrary description of the role''' | ret = { 'name' : name , 'changes' : { } , 'result' : True , 'comment' : '' }
kwargs = __utils__ [ 'args.clean_kwargs' ] ( ** kwargs )
__salt__ [ 'keystoneng.setup_clouds' ] ( auth )
kwargs [ 'name' ] = name
role = __salt__ [ 'keystoneng.role_get' ] ( ** kwargs )
if not role :
if __opts__ [ 'test' ] is True :
ret [ 'result' ] = None
ret [ 'changes' ] = kwargs
ret [ 'comment' ] = 'Role will be created.'
return ret
role = __salt__ [ 'keystoneng.role_create' ] ( ** kwargs )
ret [ 'changes' ] [ 'id' ] = role . id
ret [ 'changes' ] [ 'name' ] = role . name
ret [ 'comment' ] = 'Created role'
return ret
# NOTE ( SamYaple ) : Update support pending https : / / review . openstack . org / # / c / 496992/
return ret |
def read_i2c_block_data ( self , i2c_addr , register , length , force = None ) :
"""Read a block of byte data from a given register .
: param i2c _ addr : i2c address
: type i2c _ addr : int
: param register : Start register
: type register : int
: param length : Desired block length
: type length : int
: param force :
: type force : Boolean
: return : List of bytes
: rtype : list""" | if length > I2C_SMBUS_BLOCK_MAX :
raise ValueError ( "Desired block length over %d bytes" % I2C_SMBUS_BLOCK_MAX )
self . _set_address ( i2c_addr , force = force )
msg = i2c_smbus_ioctl_data . create ( read_write = I2C_SMBUS_READ , command = register , size = I2C_SMBUS_I2C_BLOCK_DATA )
msg . data . contents . byte = length
ioctl ( self . fd , I2C_SMBUS , msg )
return msg . data . contents . block [ 1 : length + 1 ] |
def WriteArtifact ( self , artifact ) :
"""Writes new artifact to the database .""" | name = str ( artifact . name )
if name in self . artifacts :
raise db . DuplicatedArtifactError ( name )
self . artifacts [ name ] = artifact . Copy ( ) |
def _create_series ( ndschan , value , start , end , series_class = TimeSeries ) :
"""Create a timeseries to cover the specified [ start , end ) limits
To cover a gap in data returned from NDS""" | channel = Channel . from_nds2 ( ndschan )
nsamp = int ( ( end - start ) * channel . sample_rate . value )
return series_class ( numpy_ones ( nsamp ) * value , t0 = start , sample_rate = channel . sample_rate , unit = channel . unit , channel = channel ) |
def to_etree ( source , root_tag = None ) :
"""Convert various representations of an XML structure to a etree Element
Args :
source - - The source object to be converted - ET . Element \ ElementTree , dict or string .
Keyword args :
root _ tag - - A optional parent tag in which to wrap the xml tree if no root in dict representation .
See dict _ to _ etree ( )
Returns :
A etree Element matching the source object .
> > > to _ etree ( " < content / > " ) # doctest : + ELLIPSIS
< Element content at 0x . . . >
> > > to _ etree ( { ' document ' : { ' title ' : ' foo ' , ' list ' : [ { ' li ' : 1 } , { ' li ' : 2 } ] } } ) # doctest : + ELLIPSIS
< Element document at 0x . . . >
> > > to _ etree ( ET . Element ( ' root ' ) ) # doctest : + ELLIPSIS
< Element root at 0x . . . >""" | if hasattr ( source , 'get_root' ) : # XXX :
return source . get_root ( )
elif isinstance ( source , type ( ET . Element ( 'x' ) ) ) : # XXX : # cElementTree . Element isn ' t exposed directly
return source
elif isinstance ( source , basestring ) :
try :
return ET . fromstring ( source )
except :
raise XMLError ( source )
elif hasattr ( source , 'keys' ) : # Dict .
return dict_to_etree ( source , root_tag )
else :
raise XMLError ( source ) |
def get_delta_TR ( tail_check_max , y_int ) :
"""input : tail _ check _ max , y _ intercept
output : delta _ TR""" | if tail_check_max == 0 or numpy . isnan ( tail_check_max ) :
return float ( 'nan' )
delta_TR = ( old_div ( tail_check_max , abs ( y_int ) ) ) * 100.
return delta_TR |
def update ( self , ** kwargs ) :
"""Update a resource by passing in modifications via keyword arguments .""" | data = self . _generate_input_dict ( ** kwargs )
self . load ( self . client . put ( '/' . join ( self . url . split ( '/' ) [ : - 1 ] ) + 's' , data = data ) )
return self |
def state ( self , state ) :
"""Update the status of a build""" | state = state . lower ( )
if state not in valid_states :
raise ValueError ( "Build state must have a value from:\n{}" . format ( ", " . join ( valid_state ) ) )
self . obj [ 'state' ] = state
self . changes . append ( "Updating build:{}.state={}" . format ( self . obj [ 'name' ] , state ) )
return self |
def canonical_fix_name ( fix , avail_fixes ) :
"""Examples :
> > > canonical _ fix _ name ( ' fix _ wrap _ text _ literals ' )
' libfuturize . fixes . fix _ wrap _ text _ literals '
> > > canonical _ fix _ name ( ' wrap _ text _ literals ' )
' libfuturize . fixes . fix _ wrap _ text _ literals '
> > > canonical _ fix _ name ( ' wrap _ te ' )
ValueError ( " unknown fixer name " )
> > > canonical _ fix _ name ( ' wrap ' )
ValueError ( " ambiguous fixer name " )""" | if ".fix_" in fix :
return fix
else :
if fix . startswith ( 'fix_' ) :
fix = fix [ 4 : ]
# Infer the full module name for the fixer .
# First ensure that no names clash ( e . g .
# lib2to3 . fixes . fix _ blah and libfuturize . fixes . fix _ blah ) :
found = [ f for f in avail_fixes if f . endswith ( 'fix_{0}' . format ( fix ) ) ]
if len ( found ) > 1 :
raise ValueError ( "Ambiguous fixer name. Choose a fully qualified " "module name instead from these:\n" + "\n" . join ( " " + myf for myf in found ) )
elif len ( found ) == 0 :
raise ValueError ( "Unknown fixer. Use --list-fixes or -l for a list." )
return found [ 0 ] |
def _maybe_convert_usecols ( usecols ) :
"""Convert ` usecols ` into a compatible format for parsing in ` parsers . py ` .
Parameters
usecols : object
The use - columns object to potentially convert .
Returns
converted : object
The compatible format of ` usecols ` .""" | if usecols is None :
return usecols
if is_integer ( usecols ) :
warnings . warn ( ( "Passing in an integer for `usecols` has been " "deprecated. Please pass in a list of int from " "0 to `usecols` inclusive instead." ) , FutureWarning , stacklevel = 2 )
return lrange ( usecols + 1 )
if isinstance ( usecols , str ) :
return _range2cols ( usecols )
return usecols |
def get ( self , blueprint , user = None , user_id = None ) :
"""When you have a statement in your code that says
" if < provider > . authorized : " ( for example " if twitter . authorized : " ) ,
a long string of function calls result in this function being used to
check the Flask server ' s cache and database for any records associated
with the current _ user . The ` user ` and ` user _ id ` parameters are actually
not set in that case ( see base . py : token ( ) , that ' s what calls this
function ) , so the user information is instead loaded from the
current _ user ( if that ' s what you specified when you created the
blueprint ) with blueprint . config . get ( ' user _ id ' ) .
: param blueprint :
: param user :
: param user _ id :
: return :""" | # check cache
cache_key = self . make_cache_key ( blueprint = blueprint , user = user , user_id = user_id )
token = self . cache . get ( cache_key )
if token :
return token
# if not cached , make database queries
query = self . session . query ( self . model ) . filter_by ( provider = blueprint . name )
uid = first ( [ user_id , self . user_id , blueprint . config . get ( "user_id" ) ] )
u = first ( _get_real_user ( ref , self . anon_user ) for ref in ( user , self . user , blueprint . config . get ( "user" ) ) )
if self . user_required and not u and not uid :
raise ValueError ( "Cannot get OAuth token without an associated user" )
# check for user ID
if hasattr ( self . model , "user_id" ) and uid :
query = query . filter_by ( user_id = uid )
# check for user ( relationship property )
elif hasattr ( self . model , "user" ) and u :
query = query . filter_by ( user = u )
# if we have the property , but not value , filter by None
elif hasattr ( self . model , "user_id" ) :
query = query . filter_by ( user_id = None )
# run query
try :
token = query . one ( ) . token
except NoResultFound :
token = None
# cache the result
self . cache . set ( cache_key , token )
return token |
def download_setuptools ( version = DEFAULT_VERSION , download_base = DEFAULT_URL , to_dir = os . curdir , delay = 15 ) :
"""Download distribute from a specified location and return its filename
` version ` should be a valid distribute version number that is available
as an egg for download under the ` download _ base ` URL ( which should end
with a ' / ' ) . ` to _ dir ` is the directory where the egg will be downloaded .
` delay ` is the number of seconds to pause before an actual download
attempt .""" | # making sure we use the absolute path
to_dir = os . path . abspath ( to_dir )
try :
from urllib . request import urlopen
except ImportError :
from urllib2 import urlopen
tgz_name = "distribute-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os . path . join ( to_dir , tgz_name )
src = dst = None
if not os . path . exists ( saveto ) : # Avoid repeated downloads
try :
log . warn ( "Downloading %s" , url )
src = urlopen ( url )
# Read / write all in one block , so we don ' t create a corrupt file
# if the download is interrupted .
data = src . read ( )
dst = open ( saveto , "wb" )
dst . write ( data )
finally :
if src :
src . close ( )
if dst :
dst . close ( )
return os . path . realpath ( saveto ) |
def _list_nodes ( full = False , for_output = False ) :
'''Helper function to format and parse node data .''' | fetch = True
page = 1
ret = { }
while fetch :
items = query ( method = 'droplets' , command = '?page=' + six . text_type ( page ) + '&per_page=200' )
for node in items [ 'droplets' ] :
name = node [ 'name' ]
ret [ name ] = { }
if full :
ret [ name ] = _get_full_output ( node , for_output = for_output )
else :
public_ips , private_ips = _get_ips ( node [ 'networks' ] )
ret [ name ] = { 'id' : node [ 'id' ] , 'image' : node [ 'image' ] [ 'name' ] , 'name' : name , 'private_ips' : private_ips , 'public_ips' : public_ips , 'size' : node [ 'size_slug' ] , 'state' : six . text_type ( node [ 'status' ] ) , }
page += 1
try :
fetch = 'next' in items [ 'links' ] [ 'pages' ]
except KeyError :
fetch = False
return ret |
def sample_rate ( self ) :
"""0 means unknown""" | if self . sbrPresentFlag == 1 :
return self . extensionSamplingFrequency
elif self . sbrPresentFlag == 0 :
return self . samplingFrequency
else : # these are all types that support SBR
aot_can_sbr = ( 1 , 2 , 3 , 4 , 6 , 17 , 19 , 20 , 22 )
if self . audioObjectType not in aot_can_sbr :
return self . samplingFrequency
# there shouldn ' t be SBR for > 48KHz
if self . samplingFrequency > 24000 :
return self . samplingFrequency
# either samplingFrequency or samplingFrequency * 2
return 0 |
def release_value_set ( self ) :
"""Release a reserved value set so that other executions can use it also .""" | if self . _remotelib :
self . _remotelib . run_keyword ( 'release_value_set' , [ self . _my_id ] , { } )
else :
_PabotLib . release_value_set ( self , self . _my_id ) |
def filter_published ( self , queryset ) :
"""Filter the given pages : class : ` QuerySet ` to obtain only published
page .""" | if settings . PAGE_USE_SITE_ID :
queryset = queryset . filter ( sites = global_settings . SITE_ID )
queryset = queryset . filter ( status = self . model . PUBLISHED )
if settings . PAGE_SHOW_START_DATE :
queryset = queryset . filter ( publication_date__lte = get_now ( ) )
if settings . PAGE_SHOW_END_DATE :
queryset = queryset . filter ( Q ( publication_end_date__gt = get_now ( ) ) | Q ( publication_end_date__isnull = True ) )
return queryset |
def load_obj ( fn ) :
"""Load 3d mesh form . obj ' file .
Args :
fn : Input file name or file - like object .
Returns :
dictionary with the following keys ( some of which may be missing ) :
position : np . float32 , ( n , 3 ) array , vertex positions
uv : np . float32 , ( n , 2 ) array , vertex uv coordinates
normal : np . float32 , ( n , 3 ) array , vertex uv normals
face : np . int32 , ( k * 3 , ) traingular face indices""" | position = [ np . zeros ( 3 , dtype = np . float32 ) ]
normal = [ np . zeros ( 3 , dtype = np . float32 ) ]
uv = [ np . zeros ( 2 , dtype = np . float32 ) ]
tuple2idx = OrderedDict ( )
trinagle_indices = [ ]
input_file = open ( fn ) if isinstance ( fn , str ) else fn
for line in input_file :
line = line . strip ( )
if not line or line [ 0 ] == '#' :
continue
line = line . split ( ' ' , 1 )
tag = line [ 0 ]
if len ( line ) > 1 :
line = line [ 1 ]
else :
line = ''
if tag == 'v' :
position . append ( np . fromstring ( line , sep = ' ' ) )
elif tag == 'vt' :
uv . append ( np . fromstring ( line , sep = ' ' ) )
elif tag == 'vn' :
normal . append ( np . fromstring ( line , sep = ' ' ) )
elif tag == 'f' :
output_face_indices = [ ]
for chunk in line . split ( ) : # tuple order : pos _ idx , uv _ idx , normal _ idx
vt = _parse_vertex_tuple ( chunk )
if vt not in tuple2idx : # create a new output vertex ?
tuple2idx [ vt ] = len ( tuple2idx )
output_face_indices . append ( tuple2idx [ vt ] )
# generate face triangles
for i in range ( 1 , len ( output_face_indices ) - 1 ) :
for vi in [ 0 , i , i + 1 ] :
trinagle_indices . append ( output_face_indices [ vi ] )
outputs = { }
outputs [ 'face' ] = np . int32 ( trinagle_indices )
pos_idx , uv_idx , normal_idx = np . int32 ( list ( tuple2idx ) ) . T
if np . any ( pos_idx ) :
outputs [ 'position' ] = _unify_rows ( position ) [ pos_idx ]
if np . any ( uv_idx ) :
outputs [ 'uv' ] = _unify_rows ( uv ) [ uv_idx ]
if np . any ( normal_idx ) :
outputs [ 'normal' ] = _unify_rows ( normal ) [ normal_idx ]
return outputs |
def get_cpu_info ( self ) :
"""Retrieves CPU info from client""" | info = snap7 . snap7types . S7CpuInfo ( )
result = self . library . Cli_GetCpuInfo ( self . pointer , byref ( info ) )
check_error ( result , context = "client" )
return info |
def autozoom ( self , points ) :
'''Fit the current view to the correct zoom level to display
all * points * .
The camera viewing direction and rotation pivot match the
geometric center of the points and the distance from that
point is calculated in order for all points to be in the field
of view . This is currently used to provide optimal
visualization for molecules and systems
* * Parameters * *
points : np . ndarray ( ( N , 3 ) )
Array of points .''' | points = np . asarray ( points )
extraoff = 0.01
# Project points on the plane defined by camera up and right
# vector . This is achieved by using dot product on camera a
# and b vectors
abc = np . array ( [ self . a , self . b , self . c ] )
old_geom_center = points . sum ( axis = 0 ) / len ( points )
# Translate points
points = points . copy ( ) + self . position
# Translate position to geometric _ center along directions
# a and b
geom_center = points . sum ( axis = 0 ) / len ( points )
self . position += self . a * np . dot ( geom_center , self . a )
self . position += self . b * np . dot ( geom_center , self . b )
# Translate pivot to the geometric center
self . pivot = old_geom_center
# Get the bounding sphere radius by searching for the most
# distant point
bound_radius = np . sqrt ( ( ( points - geom_center ) * ( points - geom_center ) ) . sum ( axis = 1 ) . max ( ) )
# Calculate the distance in order to have the most distant
# point in our field of view ( top / bottom )
fov_topbottom = self . fov * np . pi / 180.0
dist = ( bound_radius + self . z_near ) / np . tan ( fov_topbottom * 0.5 )
# Set the c - component of the position at the calculated distance
# 1 ) translate the position on the pivot
self . position = self . pivot . copy ( )
# 2 ) add the distance plus a little extra room
self . position -= self . c * ( dist * ( 1 + extraoff ) ) |
def request_callback_answer ( self , chat_id : Union [ int , str ] , message_id : int , callback_data : bytes ) :
"""Use this method to request a callback answer from bots .
This is the equivalent of clicking an inline button containing callback data .
Args :
chat _ id ( ` ` int ` ` | ` ` str ` ` ) :
Unique identifier ( int ) or username ( str ) of the target chat .
For your personal cloud ( Saved Messages ) you can simply use " me " or " self " .
For a contact that exists in your Telegram address book you can use his phone number ( str ) .
message _ id ( ` ` int ` ` ) :
The message id the inline keyboard is attached on .
callback _ data ( ` ` bytes ` ` ) :
Callback data associated with the inline button you want to get the answer from .
Returns :
The answer containing info useful for clients to display a notification at the top of the chat screen
or as an alert .
Raises :
: class : ` RPCError < pyrogram . RPCError > ` in case of a Telegram RPC error .
` ` TimeoutError ` ` if the bot fails to answer within 10 seconds .""" | return self . send ( functions . messages . GetBotCallbackAnswer ( peer = self . resolve_peer ( chat_id ) , msg_id = message_id , data = callback_data ) , retries = 0 , timeout = 10 ) |
def foldx_dir ( self ) :
"""str : FoldX folder""" | if self . root_dir :
return op . join ( self . root_dir , self . _foldx_dirname )
else :
log . warning ( 'Root directory not set' )
return None |
def change_host_modattr ( self , host , value ) :
"""Change host modified attributes
Format of the line that triggers function call : :
CHANGE _ HOST _ MODATTR ; < host _ name > ; < value >
For boolean attributes , toggles the service attribute state ( enable / disable )
For non boolean attribute , only indicates that the corresponding attribute is to be saved
in the retention .
Value can be :
MODATTR _ NONE 0
MODATTR _ NOTIFICATIONS _ ENABLED 1
MODATTR _ ACTIVE _ CHECKS _ ENABLED 2
MODATTR _ PASSIVE _ CHECKS _ ENABLED 4
MODATTR _ EVENT _ HANDLER _ ENABLED 8
MODATTR _ FLAP _ DETECTION _ ENABLED 16
MODATTR _ PERFORMANCE _ DATA _ ENABLED 64
MODATTR _ EVENT _ HANDLER _ COMMAND 256
MODATTR _ CHECK _ COMMAND 512
MODATTR _ NORMAL _ CHECK _ INTERVAL 1024
MODATTR _ RETRY _ CHECK _ INTERVAL 2048
MODATTR _ MAX _ CHECK _ ATTEMPTS 4096
MODATTR _ FRESHNESS _ CHECKS _ ENABLED 8192
MODATTR _ CHECK _ TIMEPERIOD 16384
MODATTR _ CUSTOM _ VARIABLE 32768
MODATTR _ NOTIFICATION _ TIMEPERIOD 65536
: param host : host to edit
: type host : alignak . objects . host . Host
: param value : new value to set
: type value : str
: return : None""" | # todo : deprecate this
# We need to change each of the needed attributes .
previous_value = host . modified_attributes
changes = int ( value )
# For all boolean and non boolean attributes
for modattr in [ "MODATTR_NOTIFICATIONS_ENABLED" , "MODATTR_ACTIVE_CHECKS_ENABLED" , "MODATTR_PASSIVE_CHECKS_ENABLED" , "MODATTR_EVENT_HANDLER_ENABLED" , "MODATTR_FLAP_DETECTION_ENABLED" , "MODATTR_PERFORMANCE_DATA_ENABLED" , "MODATTR_FRESHNESS_CHECKS_ENABLED" , "MODATTR_EVENT_HANDLER_COMMAND" , "MODATTR_CHECK_COMMAND" , "MODATTR_NORMAL_CHECK_INTERVAL" , "MODATTR_RETRY_CHECK_INTERVAL" , "MODATTR_MAX_CHECK_ATTEMPTS" , "MODATTR_FRESHNESS_CHECKS_ENABLED" , "MODATTR_CHECK_TIMEPERIOD" , "MODATTR_CUSTOM_VARIABLE" , "MODATTR_NOTIFICATION_TIMEPERIOD" ] :
if changes & DICT_MODATTR [ modattr ] . value : # Toggle the concerned service attribute
setattr ( host , DICT_MODATTR [ modattr ] . attribute , not getattr ( host , DICT_MODATTR [ modattr ] . attribute ) )
host . modified_attributes = previous_value ^ changes
# And we need to push the information to the scheduler .
self . send_an_element ( host . get_update_status_brok ( ) ) |
def create ( index_name , body , force , verbose ) :
"""Create a new index .""" | result = current_search_client . indices . create ( index = index_name , body = json . load ( body ) , ignore = [ 400 ] if force else None , )
if verbose :
click . echo ( json . dumps ( result ) ) |
def remove_domain_user_role ( request , user , role , domain = None ) :
"""Removes a given single role for a user from a domain .""" | manager = keystoneclient ( request , admin = True ) . roles
return manager . revoke ( role , user = user , domain = domain ) |
def get_set ( self , flag , new ) :
"""Return the boolean value of ' flag ' . If ' new ' is set ,
the flag is updated , and the value before update is
returned .""" | old = self . _is_set ( flag )
if new is True :
self . _set ( flag )
elif new is False :
self . _clear ( flag )
return old |
def ipostorder ( self ) :
'''Depth - first post - order iteration of tree nodes''' | children = [ self , ]
seen = set ( )
while children :
cur_node = children [ - 1 ]
if cur_node not in seen :
seen . add ( cur_node )
children . extend ( reversed ( cur_node . children ) )
else :
children . pop ( )
yield cur_node |
def _create_server ( host , port ) :
"""Helper function . Creates a listening socket on the designated
host and port . Modeled on the socket . create _ connection ( )
function .""" | exc = socket . error ( "getaddrinfo returns an empty list" )
for res in socket . getaddrinfo ( host , port , 0 , socket . SOCK_STREAM ) :
af , socktype , proto , canonname , sa = res
sock = None
try : # Create the listening socket
sock = socket . socket ( af , socktype , proto )
sock . setsockopt ( socket . SOL_SOCKET , socket . SO_REUSEADDR , 1 )
sock . bind ( sa )
sock . listen ( 1024 )
return sock
except socket . error as exc : # Clean up after ourselves
if sock is not None :
sock . close ( )
# Couldn ' t create a listening socket
raise exc |
def model_changed ( self , model , prop_name , info ) :
"""This method notifies the parent state about changes made to the state element""" | if self . parent is not None :
self . parent . model_changed ( model , prop_name , info ) |
def get_object ( brain_or_object ) :
"""Get the full content object
: param brain _ or _ object : A single catalog brain or content object
: type brain _ or _ object : PortalObject / ATContentType / DexterityContentType
/ CatalogBrain
: returns : The full object""" | if not is_object ( brain_or_object ) :
fail ( "{} is not supported." . format ( repr ( brain_or_object ) ) )
if is_brain ( brain_or_object ) :
return brain_or_object . getObject ( )
return brain_or_object |
def addChild ( self , item ) :
"""When you add a child to a Node , you are adding yourself as a parent to the child
You cannot have the same node as a child more than once .
If you add a Node , it is used . If you add a non - node , a new child Node is created . Thus : You cannot
add a child as an item which is a Node . ( You can , however , construct such a node , and add it as a child )""" | if not isinstance ( item , Node ) :
item = Node ( item )
if item in self . children :
return item
self . children . append ( item )
item . parents . add ( self )
return item |
def location ( args ) :
"""% prog location bedfile fastafile
Given SNP locations , summarize the locations in the sequences . For example ,
find out if there are more 3 ` - SNPs than 5 ` - SNPs .""" | from jcvi . formats . bed import BedLine
from jcvi . graphics . histogram import stem_leaf_plot
p = OptionParser ( location . __doc__ )
p . add_option ( "--dist" , default = 100 , type = "int" , help = "Distance cutoff to call 5` and 3` [default: %default]" )
opts , args = p . parse_args ( args )
if len ( args ) != 2 :
sys . exit ( not p . print_help ( ) )
bedfile , fastafile = args
dist = opts . dist
sizes = Sizes ( fastafile ) . mapping
fp = open ( bedfile )
fiveprime = threeprime = total = 0
percentages = [ ]
for row in fp :
b = BedLine ( row )
pos = b . start
size = sizes [ b . seqid ]
if pos < dist :
fiveprime += 1
if size - pos < dist :
threeprime += 1
total += 1
percentages . append ( 100 * pos / size )
m = "Five prime (within {0}bp of start codon): {1}\n" . format ( dist , fiveprime )
m += "Three prime (within {0}bp of stop codon): {1}\n" . format ( dist , threeprime )
m += "Total: {0}" . format ( total )
print ( m , file = sys . stderr )
bins = 10
title = "Locations within the gene [0=Five-prime, 100=Three-prime]"
stem_leaf_plot ( percentages , 0 , 100 , bins , title = title ) |
def match_hail_size_step_distributions ( self , model_tracks , obs_tracks , track_pairings ) :
"""Given a matching set of observed tracks for each model track ,
Args :
model _ tracks :
obs _ tracks :
track _ pairings :
Returns :""" | label_columns = [ "Matched" , "Max_Hail_Size" , "Num_Matches" , "Shape" , "Location" , "Scale" ]
s = 0
for m , model_track in enumerate ( model_tracks ) :
model_track . observations = pd . DataFrame ( index = model_track . times , columns = label_columns , dtype = np . float64 )
model_track . observations . loc [ : , : ] = 0
model_track . observations [ "Matched" ] = model_track . observations [ "Matched" ] . astype ( np . int32 )
for t , time in enumerate ( model_track . times ) :
model_track . observations . loc [ time , "Matched" ] = track_pairings . loc [ s , "Matched" ]
if model_track . observations . loc [ time , "Matched" ] > 0 :
all_hail_sizes = [ ]
step_pairs = track_pairings . loc [ s , "Pairings" ]
for step_pair in step_pairs :
obs_step = obs_tracks [ step_pair [ 0 ] ] . timesteps [ step_pair [ 1 ] ] . ravel ( )
obs_mask = obs_tracks [ step_pair [ 0 ] ] . masks [ step_pair [ 1 ] ] . ravel ( )
all_hail_sizes . append ( obs_step [ ( obs_mask == 1 ) & ( obs_step >= self . mrms_ew . min_thresh ) ] )
combined_hail_sizes = np . concatenate ( all_hail_sizes )
min_hail = combined_hail_sizes . min ( ) - 0.1
model_track . observations . loc [ time , "Max_Hail_Size" ] = combined_hail_sizes . max ( )
model_track . observations . loc [ time , "Num_Matches" ] = step_pairs . shape [ 0 ]
model_track . observations . loc [ time , [ "Shape" , "Location" , "Scale" ] ] = gamma . fit ( combined_hail_sizes , floc = min_hail )
s += 1 |
def start_serving ( self , delegate : httputil . HTTPServerConnectionDelegate ) -> None :
"""Starts serving requests on this connection .
: arg delegate : a ` . HTTPServerConnectionDelegate `""" | assert isinstance ( delegate , httputil . HTTPServerConnectionDelegate )
fut = gen . convert_yielded ( self . _server_request_loop ( delegate ) )
self . _serving_future = fut
# Register the future on the IOLoop so its errors get logged .
self . stream . io_loop . add_future ( fut , lambda f : f . result ( ) ) |
def cudnnGetFilter4dDescriptor ( wDesc ) :
"""Get parameters of filter descriptor .
This function queries the parameters of the previouly initialized filter descriptor object .
Parameters
wDesc : cudnnFilterDescriptor
Handle to a previously created filter descriptor .
Returns
dataType : cudnnDataType
Data type .
format : cudnnTensorFormat
Tensor format
k : int
Number of output feature maps .
c : int
Number of input feature maps .
h : int
Height of each filter .
w : int
Width of each filter .""" | dataType = ctypes . c_int ( )
format = ctypes . c_int ( )
k = ctypes . c_int ( )
c = ctypes . c_int ( )
h = ctypes . c_int ( )
w = ctypes . c_int ( )
status = _libcudnn . cudnnGetFilter4dDescriptor ( wDesc , ctypes . byref ( dataType ) , ctypes . byref ( format ) , ctypes . byref ( k ) , ctypes . byref ( c ) , ctypes . byref ( h ) , ctypes . byref ( w ) )
cudnnCheckStatus ( status )
return dataType . value , format . value , k . value , c . value , h . value , w . value |
def quickinfo ( self ) :
"""Returns a short string describing some of the options of the actor .
: return : the info , None if not available
: rtype : str""" | return "search: " + base . to_commandline ( self . config [ "search" ] ) + ", eval: " + base . to_commandline ( self . config [ "eval" ] ) |
def where ( self , where : str ) -> 'SASdata' :
"""This method returns a clone of the SASdata object , with the where attribute set . The original SASdata object is not affected .
: param where : the where clause to apply
: return : SAS data object""" | sd = SASdata ( self . sas , self . libref , self . table , dsopts = dict ( self . dsopts ) )
sd . HTML = self . HTML
sd . dsopts [ 'where' ] = where
return sd |
def get_range ( element , ranges , dimension ) :
"""Computes the data , soft - and hard - range along a dimension given
an element and a dictionary of ranges .""" | if dimension and dimension != 'categorical' :
if ranges and dimension . name in ranges :
drange = ranges [ dimension . name ] [ 'data' ]
srange = ranges [ dimension . name ] [ 'soft' ]
hrange = ranges [ dimension . name ] [ 'hard' ]
else :
drange = element . range ( dimension , dimension_range = False )
srange = dimension . soft_range
hrange = dimension . range
else :
drange = srange = hrange = ( np . NaN , np . NaN )
return drange , srange , hrange |
def count_missing ( self , data , output = "number" ) :
"""Parameters
data : pd . DataFrame ( )
Input dataframe .
output : str
Sting indicating the output of function ( number or percent )
Returns
int / float
Count of missing data ( int or float )""" | count = self . _find_missing ( data , return_bool = False ) . sum ( )
if output == "number" :
return count
elif output == "percent" :
return ( ( count / ( data . shape [ 0 ] ) ) * 100 ) |
def configuration ( self , plugin ) :
"""Get plugin configuration .
Return a tuple of ( on | off | default , args )""" | conf = self . config . get ( plugin , "default;" ) . split ( ';' )
if len ( conf ) == 1 :
conf . append ( '' )
return tuple ( conf ) |
def api_delete ( service , file_id , owner_token ) :
"""Delete a file already uploaded to Send""" | service += 'api/delete/%s' % file_id
r = requests . post ( service , json = { 'owner_token' : owner_token , 'delete_token' : owner_token } )
r . raise_for_status ( )
if r . text == 'OK' :
return True
return False |
def check_yaafe ( ) :
"Check Aubio availability" | try :
import yaafelib
except ImportError :
warnings . warn ( 'Yaafe librairy is not available' , ImportWarning , stacklevel = 2 )
_WITH_YAAFE = False
else :
_WITH_YAAFE = True
del yaafelib
return _WITH_YAAFE |
def _to_rest_rels ( model , props ) :
"""Move the relationships to appropriate location in the props
All to _ ones should be in a to _ one key while all to _ manys
should be in a to _ many key .""" | props [ 'to_many' ] = { }
props [ 'to_one' ] = { }
for key in model . to_one :
try :
props [ 'to_one' ] [ key ] = props . pop ( key )
except KeyError :
continue
for key in model . to_many :
try :
props [ 'to_many' ] [ key ] = props . pop ( key )
except KeyError :
continue |
def get_plugin_info ( self , plugin_name ) :
"""Get plugin information""" | if plugin_name :
for i in self . get_all_plugins :
if i [ "plugin_name" ] == plugin_name :
return i |
def BuildApprovalUrn ( self , approval_id ) :
"""Builds approval object urn .""" | event = rdf_events . AuditEvent ( user = self . token . username , action = "CLIENT_APPROVAL_REQUEST" , client = self . subject_urn , description = self . reason )
events . Events . PublishEvent ( "Audit" , event , token = self . token )
return self . ApprovalUrnBuilder ( self . subject_urn . Path ( ) , self . token . username , approval_id ) |
def __deserialize ( self , data , klass ) :
"""Deserializes dict , list , str into an object .
: param data : dict , list or str .
: param klass : class literal , or string of class name .
: return : object .""" | if data is None :
return None
if type ( klass ) == str :
if klass . startswith ( 'list[' ) :
sub_kls = re . match ( 'list\[(.*)\]' , klass ) . group ( 1 )
return [ self . __deserialize ( sub_data , sub_kls ) for sub_data in data ]
if klass . startswith ( 'dict(' ) :
sub_kls = re . match ( 'dict\(([^,]*), (.*)\)' , klass ) . group ( 2 )
return { k : self . __deserialize ( v , sub_kls ) for k , v in iteritems ( data ) }
# convert str to class
# for native types
if klass in [ 'int' , 'float' , 'str' , 'bool' , "date" , 'datetime' , "object" ] :
klass = eval ( klass )
# for model types
else :
klass = eval ( 'models.' + klass )
if klass in [ int , float , str , bool ] :
return self . __deserialize_primitive ( data , klass )
elif klass == object :
return self . __deserialize_object ( data )
elif klass == date :
return self . __deserialize_date ( data )
elif klass == datetime :
return self . __deserialize_datatime ( data )
else :
return self . __deserialize_model ( data , klass ) |
def clean ( self ) :
"""Clean response .""" | if self . response . type == 'application/json' :
cleaned = copy . deepcopy ( self . response . data )
if self . cleaner is not None :
cleaned = self . cleaner ( cleaned )
typed_response = { dict : DictResponse , int : IntResponse , } . get ( type ( cleaned ) , BaseResponse )
self . response = typed_response . from_cleaned ( self . response , cleaned )
else :
self . response = FileResponse ( self . response . response ) |
def merge_options ( cls , groups , options = None , ** kwargs ) :
"""Given a full options dictionary and options groups specified
as a keywords , return the full set of merged options :
> > > options = { ' Curve ' : { ' style ' : dict ( color = ' b ' ) } }
> > > style = { ' Curve ' : { ' linewidth ' : 10 } }
> > > merged = StoreOptions . merge _ options ( [ ' style ' ] , options , style = style )
> > > sorted ( merged [ ' Curve ' ] [ ' style ' ] . items ( ) )
[ ( ' color ' , ' b ' ) , ( ' linewidth ' , 10 ) ]""" | groups = set ( groups )
if ( options is not None and set ( options . keys ( ) ) <= groups ) :
kwargs , options = options , None
elif ( options is not None and any ( k in groups for k in options ) ) :
raise Exception ( "All keys must be a subset of %s" % ', ' . join ( groups ) )
options = { } if ( options is None ) else dict ( ** options )
all_keys = set ( k for d in kwargs . values ( ) for k in d )
for spec_key in all_keys :
additions = { }
for k , d in kwargs . items ( ) :
if spec_key in d :
kws = d [ spec_key ]
additions . update ( { k : kws } )
if spec_key not in options :
options [ spec_key ] = { }
for key in additions :
if key in options [ spec_key ] :
options [ spec_key ] [ key ] . update ( additions [ key ] )
else :
options [ spec_key ] [ key ] = additions [ key ]
return options |
def _load_multilinestring ( tokens , string ) :
"""Has similar inputs and return value to to : func : ` _ load _ point ` , except is
for handling MULTILINESTRING geometry .
: returns :
A GeoJSON ` dict ` MultiLineString representation of the WKT ` ` string ` ` .""" | open_paren = next ( tokens )
if not open_paren == '(' :
raise ValueError ( INVALID_WKT_FMT % string )
linestrs = [ ]
while True :
try :
linestr = _load_linestring ( tokens , string )
linestrs . append ( linestr [ 'coordinates' ] )
t = next ( tokens )
if t == ')' : # we ' re done ; no more linestrings .
break
except StopIteration : # If we reach this , the WKT is not valid .
raise ValueError ( INVALID_WKT_FMT % string )
return dict ( type = 'MultiLineString' , coordinates = linestrs ) |
def dusk ( self , date = None , local = True , use_elevation = True ) :
"""Calculates the dusk time ( the time in the evening when the sun is a
certain number of degrees below the horizon . By default this is 6
degrees but can be changed by setting the
: attr : ` solar _ depression ` property . )
: param date : The date for which to calculate the dusk time .
If no date is specified then the current date will be used .
: type date : : class : ` ~ datetime . date `
: param local : True = Time to be returned in location ' s time zone ;
False = Time to be returned in UTC .
If not specified then the time will be returned in local time
: type local : bool
: param use _ elevation : True = Return times that allow for the location ' s elevation ;
False = Return times that don ' t use elevation .
If not specified then times will take elevation into account .
: type use _ elevation : bool
: returns : The date and time at which dusk occurs .
: rtype : : class : ` ~ datetime . datetime `""" | if local and self . timezone is None :
raise ValueError ( "Local time requested but Location has no timezone set." )
if self . astral is None :
self . astral = Astral ( )
if date is None :
date = datetime . date . today ( )
elevation = self . elevation if use_elevation else 0
dusk = self . astral . dusk_utc ( date , self . latitude , self . longitude , observer_elevation = elevation )
if local :
return dusk . astimezone ( self . tz )
else :
return dusk |
def get_db_instances ( self ) :
'''DB instance''' | if not self . connect_to_aws_rds ( ) :
return False
try :
instances = self . rdsc . describe_db_instances ( ) . get ( 'DBInstances' )
except :
return False
else :
return instances |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.