signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def check_access_token ( self , request_token ) :
"""Checks that the token contains only safe characters
and is no shorter than lower and no longer than upper .""" | lower , upper = self . access_token_length
return ( set ( request_token ) <= self . safe_characters and lower <= len ( request_token ) <= upper ) |
def evaluate_impl ( expression , params = None ) :
'''Implementation of CalculatorImpl : : evaluate ( ) , also shared by
FunctionImpl : : call ( ) . In the latter case , ` params ` are the parameter
values passed to the function ; in the former case , ` params ` is just an
empty list .''' | which = expression . which ( )
if which == 'literal' :
return capnp . Promise ( expression . literal )
elif which == 'previousResult' :
return read_value ( expression . previousResult )
elif which == 'parameter' :
assert expression . parameter < len ( params )
return capnp . Promise ( params [ expression . parameter ] )
elif which == 'call' :
call = expression . call
func = call . function
# Evaluate each parameter .
paramPromises = [ evaluate_impl ( param , params ) for param in call . params ]
joinedParams = capnp . join_promises ( paramPromises )
# When the parameters are complete , call the function .
ret = ( joinedParams . then ( lambda vals : func . call ( vals ) ) . then ( lambda result : result . value ) )
return ret
else :
raise ValueError ( "Unknown expression type: " + which ) |
def copy_to ( self , dest , buffering : int = - 1 ) :
'''copy the file to dest path .
` dest ` canbe ` str ` , ` FileInfo ` or ` DirectoryInfo ` .
if ` dest ` is ` DirectoryInfo ` , that mean copy into the dir with same name .''' | if isinstance ( dest , str ) :
dest_path = dest
elif isinstance ( dest , FileInfo ) :
dest_path = dest . path
elif isinstance ( dest , DirectoryInfo ) :
dest_path = dest . path / self . path . name
else :
raise TypeError ( 'dest is not one of `str`, `FileInfo`, `DirectoryInfo`' )
with open ( self . _path , 'rb' , buffering = buffering ) as source : # use x mode to ensure dest does not exists .
with open ( dest_path , 'xb' ) as dest_file :
for buffer in source :
dest_file . write ( buffer ) |
def CheckProg ( context , prog_name ) :
"""Simple check if a program exists in the path . Returns the path
for the application , or None if not found .""" | res = SCons . Conftest . CheckProg ( context , prog_name )
context . did_show_result = 1
return res |
def lock_file ( f , block = False ) :
"""If block = False ( the default ) , die hard and fast if another process has
already grabbed the lock for this file .
If block = True , wait for the lock to be released , then continue .""" | try :
flags = fcntl . LOCK_EX
if not block :
flags |= fcntl . LOCK_NB
fcntl . flock ( f . fileno ( ) , flags )
except IOError as e :
if e . errno in ( errno . EACCES , errno . EAGAIN ) :
raise SystemExit ( "ERROR: %s is locked by another process." % f . name )
raise |
def iter_breadth_first ( self , start = None , do_paths = False , do_duplicates = False ) :
"""Iterate over the vertices with the breadth first algorithm .
See http : / / en . wikipedia . org / wiki / Breadth - first _ search for more info .
If not start vertex is given , the central vertex is taken .
By default , the distance to the starting vertex is also computed . If
the path to the starting vertex should be computed instead , set path
to True .
When duplicate is True , then vertices that can be reached through
different paths of equal length , will be iterated twice . This
typically only makes sense when path = = True .""" | if start is None :
start = self . central_vertex
else :
try :
start = int ( start )
except ValueError :
raise TypeError ( "First argument (start) must be an integer." )
if start < 0 or start >= self . num_vertices :
raise ValueError ( "start must be in the range [0, %i[" % self . num_vertices )
from collections import deque
work = np . zeros ( self . num_vertices , int )
work [ : ] = - 1
work [ start ] = 0
if do_paths :
result = ( start , 0 , ( start , ) )
else :
result = ( start , 0 )
yield result
todo = deque ( [ result ] )
while len ( todo ) > 0 :
if do_paths :
parent , parent_length , parent_path = todo . popleft ( )
else :
parent , parent_length = todo . popleft ( )
current_length = parent_length + 1
for current in self . neighbors [ parent ] :
visited = work [ current ]
if visited == - 1 or ( do_duplicates and visited == current_length ) :
work [ current ] = current_length
if do_paths :
current_path = parent_path + ( current , )
result = ( current , current_length , current_path )
else :
result = ( current , current_length )
# print " iter _ breadth _ first " , result
yield result
todo . append ( result ) |
def tt ( self , year = None , month = 1 , day = 1 , hour = 0 , minute = 0 , second = 0.0 , jd = None ) :
"""Build a ` Time ` from a TT calendar date .
Supply the Terrestrial Time ( TT ) as a proleptic Gregorian
calendar date :
> > > t = ts . tt ( 2014 , 1 , 18 , 1 , 35 , 37.5)
> > > t . tt
2456675.56640625
> > > t . tt _ calendar ( )
(2014 , 1 , 18 , 1 , 35 , 37.5)""" | if jd is not None :
tt = jd
else :
tt = julian_date ( _to_array ( year ) , _to_array ( month ) , _to_array ( day ) , _to_array ( hour ) , _to_array ( minute ) , _to_array ( second ) , )
tt = _to_array ( tt )
return Time ( self , tt ) |
def isItemAllowed ( self , obj ) :
"""Checks if the passed in Analysis must be displayed in the list .
: param obj : A single Analysis brain or content object
: type obj : ATContentType / CatalogBrain
: returns : True if the item can be added to the list .
: rtype : bool""" | if not obj :
return False
# Does the user has enough privileges to see retracted analyses ?
if obj . review_state == 'retracted' and not self . has_permission ( ViewRetractedAnalyses ) :
return False
return True |
def list_subdomains ( self , limit = None , offset = None ) :
"""Returns a list of all subdomains for this domain .""" | return self . manager . list_subdomains ( self , limit = limit , offset = offset ) |
def remove_directories ( list_of_paths ) :
"""Removes non - leafs from a list of directory paths""" | found_dirs = set ( '/' )
for path in list_of_paths :
dirs = path . strip ( ) . split ( '/' )
for i in range ( 2 , len ( dirs ) ) :
found_dirs . add ( '/' . join ( dirs [ : i ] ) )
paths = [ path for path in list_of_paths if ( path . strip ( ) not in found_dirs ) and path . strip ( ) [ - 1 ] != '/' ]
return paths |
def get_date ( self ) :
"""Collects sensing date of the product .
: return : Sensing date
: rtype : str""" | if self . safe_type == EsaSafeType . OLD_TYPE :
name = self . product_id . split ( '_' ) [ - 2 ]
date = [ name [ 1 : 5 ] , name [ 5 : 7 ] , name [ 7 : 9 ] ]
else :
name = self . product_id . split ( '_' ) [ 2 ]
date = [ name [ : 4 ] , name [ 4 : 6 ] , name [ 6 : 8 ] ]
return '-' . join ( date_part . lstrip ( '0' ) for date_part in date ) |
def lhs ( self ) :
"""The left - hand - side of the equation""" | lhs = self . _lhs
i = 0
while lhs is None :
i -= 1
lhs = self . _prev_lhs [ i ]
return lhs |
def drawBernoulli ( N , p = 0.5 , seed = 0 ) :
'''Generates arrays of booleans drawn from a simple Bernoulli distribution .
The input p can be a float or a list - like of floats ; its length T determines
the number of entries in the output . The t - th entry of the output is an
array of N booleans which are True with probability p [ t ] and False otherwise .
Arguments
N : int
Number of draws in each row .
p : float or [ float ]
Probability or probabilities of the event occurring ( True ) .
seed : int
Seed for random number generator .
Returns
draws : np . array or [ np . array ]
T - length list of arrays of Bernoulli draws each of size N , or a single
array of size N ( if sigma is a scalar ) .''' | # Set up the RNG
RNG = np . random . RandomState ( seed )
if isinstance ( p , float ) : # Return a single array of size N
draws = RNG . uniform ( size = N ) < p
else : # Set up empty list to populate , then loop and populate list with draws :
draws = [ ]
for t in range ( len ( p ) ) :
draws . append ( RNG . uniform ( size = N ) < p [ t ] )
return draws |
def getinputfilename ( self , inputtemplate , filename ) :
"""Determine the final filename for an input file given an inputtemplate and a given filename .
Example : :
filenameonserver = client . getinputfilename ( " someinputtemplate " , " / path / to / local / file " )""" | if inputtemplate . filename :
filename = inputtemplate . filename
elif inputtemplate . extension :
if filename . lower ( ) [ - 4 : ] == '.zip' or filename . lower ( ) [ - 7 : ] == '.tar.gz' or filename . lower ( ) [ - 8 : ] == '.tar.bz2' : # pass archives as - is
return filename
if filename [ - len ( inputtemplate . extension ) - 1 : ] . lower ( ) != '.' + inputtemplate . extension . lower ( ) :
filename += '.' + inputtemplate . extension
return filename |
def _get_sts_token ( self ) :
"""Assume a role via STS and return the credentials .
First connect to STS via : py : func : ` boto3 . client ` , then
assume a role using ` boto3 . STS . Client . assume _ role < https : / / boto3 . readthe
docs . org / en / latest / reference / services / sts . html # STS . Client . assume _ role > ` _
using ` ` self . account _ id ` ` and ` ` self . account _ role ` ` ( and optionally
` ` self . external _ id ` ` , ` ` self . mfa _ serial _ number ` ` , ` ` self . mfa _ token ` ` ) .
Return the resulting : py : class : ` ~ . ConnectableCredentials `
object .
: returns : STS assumed role credentials
: rtype : : py : class : ` ~ . ConnectableCredentials `""" | logger . debug ( "Connecting to STS in region %s" , self . region )
sts = boto3 . client ( 'sts' , region_name = self . region )
arn = "arn:aws:iam::%s:role/%s" % ( self . account_id , self . account_role )
logger . debug ( "STS assume role for %s" , arn )
assume_kwargs = { 'RoleArn' : arn , 'RoleSessionName' : 'awslimitchecker' }
if self . external_id is not None :
assume_kwargs [ 'ExternalId' ] = self . external_id
if self . mfa_serial_number is not None :
assume_kwargs [ 'SerialNumber' ] = self . mfa_serial_number
if self . mfa_token is not None :
assume_kwargs [ 'TokenCode' ] = self . mfa_token
role = sts . assume_role ( ** assume_kwargs )
creds = ConnectableCredentials ( role )
creds . account_id = self . account_id
logger . debug ( "Got STS credentials for role; access_key_id=%s " "(account_id=%s)" , creds . access_key , creds . account_id )
return creds |
def _set_linkinfo_isllink_srcport_type ( self , v , load = False ) :
"""Setter method for linkinfo _ isllink _ srcport _ type , mapped from YANG variable / brocade _ fabric _ service _ rpc / show _ linkinfo / output / show _ link _ info / linkinfo _ isl / linkinfo _ isllink _ srcport _ type ( interfacetype - type )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ linkinfo _ isllink _ srcport _ type is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ linkinfo _ isllink _ srcport _ type ( ) directly .
YANG Description : Source port / interface type .
It can take the following values :
Te - for 10G Ethernet ports .
Fi - for Fibre Channel ports .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = RestrictedClassType ( base_type = unicode , restriction_dict = { 'pattern' : u'Te|Fi' , 'length' : [ u'2' ] } ) , is_leaf = True , yang_name = "linkinfo-isllink-srcport-type" , rest_name = "linkinfo-isllink-srcport-type" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = False , extensions = { u'tailf-common' : { u'info' : u'Source port/interface type' } } , namespace = 'urn:brocade.com:mgmt:brocade-fabric-service' , defining_module = 'brocade-fabric-service' , yang_type = 'interfacetype-type' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """linkinfo_isllink_srcport_type must be of a type compatible with interfacetype-type""" , 'defined-type' : "brocade-fabric-service:interfacetype-type" , 'generated-type' : """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'Te|Fi', 'length': [u'2']}), is_leaf=True, yang_name="linkinfo-isllink-srcport-type", rest_name="linkinfo-isllink-srcport-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Source port/interface type'}}, namespace='urn:brocade.com:mgmt:brocade-fabric-service', defining_module='brocade-fabric-service', yang_type='interfacetype-type', is_config=True)""" , } )
self . __linkinfo_isllink_srcport_type = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def _pfp__snapshot ( self , recurse = True ) :
"""Save off the current value of the field""" | if hasattr ( self , "_pfp__value" ) :
self . _pfp__snapshot_value = self . _pfp__value |
def extendbed ( args ) :
"""% prog extend agpfile componentfasta
Extend the components to fill the component range . For example , a bed / gff3 file
that was converted from the agp will contain only the BAC sequence intervals
that are ' represented ' - sometimes leaving the 5 ` and 3 ` out ( those that
overlap with adjacent sequences . This script fill up those ranges ,
potentially to make graphics for tiling path .""" | from jcvi . formats . sizes import Sizes
p = OptionParser ( extendbed . __doc__ )
p . add_option ( "--nogaps" , default = False , action = "store_true" , help = "Do not print bed lines for gaps [default: %default]" )
p . add_option ( "--bed12" , default = False , action = "store_true" , help = "Produce bed12 formatted output [default: %default]" )
p . add_option ( "--gff" , default = False , action = "store_true" , help = "Produce gff3 formatted output. By default, ignores " + " AGP gap lines. [default: %default]" )
p . set_outfile ( )
opts , args = p . parse_args ( args )
if len ( args ) != 2 :
sys . exit ( not p . print_help ( ) )
# If output format is GFF3 , ignore AGP gap lines .
if opts . gff :
opts . nogaps = True
agpfile , fastafile = args
agp = AGP ( agpfile )
fw = must_open ( opts . outfile , "w" )
if opts . gff :
print ( "##gff-version 3" , file = fw )
ranges = defaultdict ( list )
thickCoords = [ ]
# These are the coordinates before modify ranges
# Make the first pass to record all the component ranges
for a in agp :
thickCoords . append ( ( a . object_beg , a . object_end ) )
if a . is_gap :
continue
ranges [ a . component_id ] . append ( a )
# Modify the ranges
sizes = Sizes ( fastafile ) . mapping
for accn , rr in ranges . items ( ) :
alen = sizes [ accn ]
a = rr [ 0 ]
if a . orientation == "+" :
hang = a . component_beg - 1
else :
hang = alen - a . component_end
a . object_beg -= hang
a = rr [ - 1 ]
if a . orientation == "+" :
hang = alen - a . component_end
else :
hang = a . component_beg - 1
a . object_end += hang
for a , ( ts , te ) in zip ( agp , thickCoords ) :
if opts . nogaps and a . is_gap :
continue
if opts . bed12 :
line = a . bedline
a . object_beg , a . object_end = ts , te
line += "\t" + a . bedextra
print ( line , file = fw )
elif opts . gff :
print ( a . gffline ( ) , file = fw )
else :
print ( a . bedline , file = fw ) |
def html ( self ) :
"""Returns an HTML citation .""" | s = ( u'{authors}, {title}, {journal}, {volissue}, {pages}, ' '({date}). {doi}.' )
au_link = ( '<a href="https://www.scopus.com/authid/detail.url' '?origin=AuthorProfile&authorId={0}">{1}</a>' )
if len ( self . authors ) > 1 :
authors = u', ' . join ( [ au_link . format ( a . auid , ( str ( a . given_name ) + ' ' + str ( a . surname ) ) ) for a in self . authors [ 0 : - 1 ] ] )
authors += ( u' and ' + au_link . format ( self . authors [ - 1 ] . auid , ( str ( self . authors [ - 1 ] . given_name ) + ' ' + str ( self . authors [ - 1 ] . surname ) ) ) )
else :
a = self . authors [ 0 ]
authors = au_link . format ( a . auid , str ( a . given_name ) + ' ' + str ( a . surname ) )
title = u'<a href="{link}">{title}</a>' . format ( link = self . scopus_url , title = self . title )
jname = self . publicationName
sid = self . source_id
jlink = ( '<a href="https://www.scopus.com/source/sourceInfo.url' '?sourceId={sid}">{journal}</a>' )
journal = jlink . format ( sid = sid , journal = jname )
volume = self . volume
issue = self . issueIdentifier
if volume and issue :
volissue = u'<b>{0}({1})</b>' . format ( volume , issue )
elif volume :
volissue = u'<b>{0}</b>' . format ( volume )
else :
volissue = 'no volume'
date = self . coverDate
if self . pageRange :
pages = u'p. {0}' . format ( self . pageRange )
elif self . startingPage :
pages = u'p. {self.startingPage}' . format ( self = self )
elif self . article_number :
pages = u'Art. No. {self.article_number}, ' . format ( self = self )
else :
pages = '(no pages found)'
doi = '<a href="https://doi.org/{0}">doi:{0}</a>' . format ( self . doi )
html = s . format ( ** locals ( ) )
return html . replace ( 'None' , '' ) |
def IsWalletTransaction ( self , tx ) :
"""Verifies if a transaction belongs to the wallet .
Args :
tx ( TransactionOutput ) : an instance of type neo . Core . TX . Transaction . TransactionOutput to verify .
Returns :
bool : True , if transaction belongs to wallet . False , if not .""" | for key , contract in self . _contracts . items ( ) :
for output in tx . outputs :
if output . ScriptHash . ToBytes ( ) == contract . ScriptHash . ToBytes ( ) :
return True
for script in tx . scripts :
if script . VerificationScript :
if bytes ( contract . Script ) == script . VerificationScript :
return True
for watch_script_hash in self . _watch_only :
for output in tx . outputs :
if output . ScriptHash == watch_script_hash :
return True
for script in tx . scripts :
if Crypto . ToScriptHash ( script . VerificationScript , unhex = False ) == watch_script_hash :
return True
return False |
def json ( self , var , default = NOTSET , force = True ) :
"""Get environment variable , parsed as a json string""" | return self . _get ( var , default = default , cast = json . loads , force = force ) |
def engine ( self ) :
"""Return an engine instance , creating it if it doesn ' t exist .
Recreate the engine connection if it wasn ' t originally created
by the current process .""" | pid = os . getpid ( )
conn = SQLAlchemyTarget . _engine_dict . get ( self . connection_string )
if not conn or conn . pid != pid : # create and reset connection
engine = sqlalchemy . create_engine ( self . connection_string , connect_args = self . connect_args , echo = self . echo )
SQLAlchemyTarget . _engine_dict [ self . connection_string ] = self . Connection ( engine , pid )
return SQLAlchemyTarget . _engine_dict [ self . connection_string ] . engine |
def read_file ( filepath : str ) -> str :
"""Read a file and return it as a string""" | # ? Check this is ok if absolute paths passed in
filepath = os . path . expanduser ( filepath )
with open ( filepath ) as opened_file : # type : IO
file_read = opened_file . read ( )
# type : str
return file_read |
def missing_whitespace_after_import_keyword ( logical_line ) :
r"""Multiple imports in form from x import ( a , b , c ) should have space
between import statement and parenthesised name list .
Okay : from foo import ( bar , baz )
E275 : from foo import ( bar , baz )
E275 : from importable . module import ( bar , baz )""" | line = logical_line
indicator = ' import('
if line . startswith ( 'from ' ) :
found = line . find ( indicator )
if - 1 < found :
pos = found + len ( indicator ) - 1
yield pos , "E275 missing whitespace after keyword" |
def get_comments_of_offer_per_page ( self , offer_id , per_page = 1000 , page = 1 ) :
"""Get comments of offer per page
: param offer _ id : the offer id
: param per _ page : How many objects per page . Default : 1000
: param page : Which page . Default : 1
: return : list""" | return self . _get_resource_per_page ( resource = OFFER_COMMENTS , per_page = per_page , page = page , params = { 'offer_id' : offer_id } , ) |
def create ( cls , client_payment_service_provider_certificate , client_payment_service_provider_certificate_chain , client_public_key_signature , custom_headers = None ) :
""": param client _ payment _ service _ provider _ certificate : Payment Services
Directive 2 compatible QSEAL certificate
: type client _ payment _ service _ provider _ certificate : str
: param client _ payment _ service _ provider _ certificate _ chain : Intermediate
and root certificate belonging to the provided certificate .
: type client _ payment _ service _ provider _ certificate _ chain : str
: param client _ public _ key _ signature : The Base64 encoded signature of the
public key provided during installation and with the installation token
appended as a nonce . Signed with the private key belonging to the QSEAL
certificate .
: type client _ public _ key _ signature : str
: type custom _ headers : dict [ str , str ] | None
: rtype : BunqResponseInt""" | if custom_headers is None :
custom_headers = { }
request_map = { cls . FIELD_CLIENT_PAYMENT_SERVICE_PROVIDER_CERTIFICATE : client_payment_service_provider_certificate , cls . FIELD_CLIENT_PAYMENT_SERVICE_PROVIDER_CERTIFICATE_CHAIN : client_payment_service_provider_certificate_chain , cls . FIELD_CLIENT_PUBLIC_KEY_SIGNATURE : client_public_key_signature }
request_map_string = converter . class_to_json ( request_map )
request_map_string = cls . _remove_field_for_request ( request_map_string )
api_client = client . ApiClient ( cls . _get_api_context ( ) )
request_bytes = request_map_string . encode ( )
endpoint_url = cls . _ENDPOINT_URL_CREATE
response_raw = api_client . post ( endpoint_url , request_bytes , custom_headers )
return BunqResponseInt . cast_from_bunq_response ( cls . _process_for_id ( response_raw ) ) |
def check_indexes_all_same ( indexes , message = "Indexes are not equal." ) :
"""Check that a list of Index objects are all equal .
Parameters
indexes : iterable [ pd . Index ]
Iterable of indexes to check .
Raises
ValueError
If the indexes are not all the same .""" | iterator = iter ( indexes )
first = next ( iterator )
for other in iterator :
same = ( first == other )
if not same . all ( ) :
bad_loc = np . flatnonzero ( ~ same ) [ 0 ]
raise ValueError ( "{}\nFirst difference is at index {}: " "{} != {}" . format ( message , bad_loc , first [ bad_loc ] , other [ bad_loc ] ) , ) |
def gen_lock ( self , lock_type = 'update' , timeout = 0 , poll_interval = 0.5 ) :
'''Set and automatically clear a lock''' | if not isinstance ( lock_type , six . string_types ) :
raise GitLockError ( errno . EINVAL , 'Invalid lock_type \'{0}\'' . format ( lock_type ) )
# Make sure that we have a positive integer timeout , otherwise just set
# it to zero .
try :
timeout = int ( timeout )
except ValueError :
timeout = 0
else :
if timeout < 0 :
timeout = 0
if not isinstance ( poll_interval , ( six . integer_types , float ) ) or poll_interval < 0 :
poll_interval = 0.5
if poll_interval > timeout :
poll_interval = timeout
lock_set = False
try :
time_start = time . time ( )
while True :
try :
self . _lock ( lock_type = lock_type , failhard = True )
lock_set = True
yield
# Break out of his loop once we ' ve yielded the lock , to
# avoid continued attempts to iterate and establish lock
break
except ( OSError , IOError , GitLockError ) as exc :
if not timeout or time . time ( ) - time_start > timeout :
raise GitLockError ( exc . errno , exc . strerror )
else :
log . debug ( 'A %s lock is already present for %s remote ' '\'%s\', sleeping %f second(s)' , lock_type , self . role , self . id , poll_interval )
time . sleep ( poll_interval )
continue
finally :
if lock_set :
self . clear_lock ( lock_type = lock_type ) |
def trial ( log_dir = None , upload_dir = None , sync_period = None , trial_prefix = "" , param_map = None , init_logging = True ) :
"""Generates a trial within a with context .""" | global _trial
# pylint : disable = global - statement
if _trial : # TODO : would be nice to stack crawl at creation time to report
# where that initial trial was created , and that creation line
# info is helpful to keep around anyway .
raise ValueError ( "A trial already exists in the current context" )
local_trial = Trial ( log_dir = log_dir , upload_dir = upload_dir , sync_period = sync_period , trial_prefix = trial_prefix , param_map = param_map , init_logging = True )
try :
_trial = local_trial
_trial . start ( )
yield local_trial
finally :
_trial = None
local_trial . close ( ) |
def on_before_trading ( self , date_time ) :
"""开盘的时候检查 , 如果有持仓 , 就把持有天数 + 1""" | if self . cta_call [ 'pos' ] > 0 :
self . cta_call [ 'days' ] += 1
if self . cta_put [ 'pos' ] > 0 :
self . cta_put [ 'days' ] += 1
self . cta_call [ 'done' ] = False
self . cta_put [ 'done' ] = False |
def loss_ratio_exceedance_matrix ( self , loss_ratios ) :
"""Compute the LREM ( Loss Ratio Exceedance Matrix ) .""" | # LREM has number of rows equal to the number of loss ratios
# and number of columns equal to the number of imls
lrem = numpy . empty ( ( len ( loss_ratios ) , len ( self . imls ) ) )
for row , loss_ratio in enumerate ( loss_ratios ) :
for col , ( mean_loss_ratio , stddev ) in enumerate ( zip ( self . mean_loss_ratios , self . stddevs ) ) :
lrem [ row , col ] = self . distribution . survival ( loss_ratio , mean_loss_ratio , stddev )
return lrem |
def tokenize ( self , sentence , normalize = True , is_feature = False , is_surface = False , return_list = False , func_normalizer = text_preprocess . normalize_text ) : # type : ( text _ preprocess , bool , bool , bool , bool , Callable [ [ str ] , text _ type ] ) - > Union [ List [ text _ type ] , TokenizedSenetence ]
"""This method returns tokenized result .
If return _ list = = True ( default ) , this method returns list whose element is tuple consisted with word _ stem and POS .
If return _ list = = False , this method returns TokenizedSenetence object .""" | assert isinstance ( normalize , bool )
assert isinstance ( sentence , text_type )
normalized_sentence = func_normalizer ( sentence )
result = self . call_juman_interface ( normalized_sentence )
token_objects = [ self . __extract_morphological_information ( mrph_object = morph_object , is_surface = is_surface , is_feature = is_feature ) for morph_object in result ]
if return_list :
tokenized_objects = TokenizedSenetence ( sentence = sentence , tokenized_objects = token_objects )
return tokenized_objects . convert_list_object ( )
else :
tokenized_objects = TokenizedSenetence ( sentence = sentence , tokenized_objects = token_objects )
return tokenized_objects |
def phase_progeny_by_transmission ( g ) :
"""Phase progeny genotypes from a trio or cross using Mendelian
transmission .
Parameters
g : array _ like , int , shape ( n _ variants , n _ samples , 2)
Genotype array , with parents as first two columns and progeny as
remaining columns .
Returns
g : ndarray , int8 , shape ( n _ variants , n _ samples , 2)
Genotype array with progeny phased where possible .
Examples
> > > import allel
> > > g = allel . GenotypeArray ( [
. . . [ [ 0 , 0 ] , [ 0 , 0 ] , [ 0 , 0 ] ] ,
. . . [ [ 1 , 1 ] , [ 1 , 1 ] , [ 1 , 1 ] ] ,
. . . [ [ 0 , 0 ] , [ 1 , 1 ] , [ 0 , 1 ] ] ,
. . . [ [ 1 , 1 ] , [ 0 , 0 ] , [ 0 , 1 ] ] ,
. . . [ [ 0 , 0 ] , [ 0 , 1 ] , [ 0 , 0 ] ] ,
. . . [ [ 0 , 0 ] , [ 0 , 1 ] , [ 0 , 1 ] ] ,
. . . [ [ 0 , 1 ] , [ 0 , 0 ] , [ 0 , 1 ] ] ,
. . . [ [ 0 , 1 ] , [ 0 , 1 ] , [ 0 , 1 ] ] ,
. . . [ [ 0 , 1 ] , [ 1 , 2 ] , [ 0 , 1 ] ] ,
. . . [ [ 1 , 2 ] , [ 0 , 1 ] , [ 1 , 2 ] ] ,
. . . [ [ 0 , 1 ] , [ 2 , 3 ] , [ 0 , 2 ] ] ,
. . . [ [ 2 , 3 ] , [ 0 , 1 ] , [ 1 , 3 ] ] ,
. . . [ [ 0 , 0 ] , [ 0 , 0 ] , [ - 1 , - 1 ] ] ,
. . . [ [ 0 , 0 ] , [ 0 , 0 ] , [ 1 , 1 ] ] ,
. . . ] , dtype = ' i1 ' )
> > > g = allel . phase _ progeny _ by _ transmission ( g )
> > > print ( g . to _ str ( row _ threshold = None ) )
0/0 0/0 0 | 0
1/1 1/1 1 | 1
0/0 1/1 0 | 1
1/1 0/0 1 | 0
0/0 0/1 0 | 0
0/0 0/1 0 | 1
0/1 0/0 1 | 0
0/1 0/1 0/1
0/1 1/2 0 | 1
1/2 0/1 2 | 1
0/1 2/3 0 | 2
2/3 0/1 3 | 1
0/0 0/0 . / .
0/0 0/0 1/1
> > > g . is _ phased
array ( [ [ False , False , True ] ,
[ False , False , True ] ,
[ False , False , True ] ,
[ False , False , True ] ,
[ False , False , True ] ,
[ False , False , True ] ,
[ False , False , True ] ,
[ False , False , False ] ,
[ False , False , True ] ,
[ False , False , True ] ,
[ False , False , True ] ,
[ False , False , True ] ,
[ False , False , False ] ,
[ False , False , False ] ] )""" | # setup
g = GenotypeArray ( g , dtype = 'i1' , copy = True )
check_ploidy ( g . ploidy , 2 )
check_min_samples ( g . n_samples , 3 )
# run the phasing
# N . B . , a copy has already been made , so no need to make memoryview safe
is_phased = _opt_phase_progeny_by_transmission ( g . values )
g . is_phased = np . asarray ( is_phased ) . view ( bool )
# outputs
return g |
def traceit ( frame , event , arg ) :
"""Trace every lie of a program . Use :
import sys
sys . settrace ( traceit )
: param frame :
: param event :
: param arg :
: return :""" | import linecache
try :
if event == "line" :
lineno = frame . f_lineno
filename = frame . f_globals . get ( "__file__" )
if ( filename . endswith ( ".pyc" ) or filename . endswith ( ".pyo" ) ) :
filename = filename [ : - 1 ]
name = frame . f_globals [ "__name__" ]
line = linecache . getline ( filename , lineno )
if name . startswith ( 'ambry' ) :
print "%s:%s: %s" % ( name , lineno , line . rstrip ( ) )
finally :
return traceit |
def unit_position ( self ) -> typing . Tuple [ float , float ] :
"""Returns : unit position""" | return self . unit_pos_x , self . unit_pos_y |
def unmangle_name ( name , classname ) :
"""Remove _ _ from the end of _ name _ if it starts with _ _ classname _ _
return the " unmangled " name .""" | if name . startswith ( classname ) and name [ - 2 : ] != '__' :
return name [ len ( classname ) - 2 : ]
return name |
def loopalt_gtd ( time : datetime , glat : Union [ float , np . ndarray ] , glon : Union [ float , np . ndarray ] , altkm : Union [ float , List [ float ] , np . ndarray ] , * , f107a : float = None , f107 : float = None , Ap : int = None ) -> xarray . Dataset :
"""loop over location and time
time : datetime or numpy . datetime64 or list of datetime or np . ndarray of datetime
glat : float or 2 - D np . ndarray
glon : float or 2 - D np . ndarray
altkm : float or list or 1 - D np . ndarray""" | glat = np . atleast_2d ( glat )
glon = np . atleast_2d ( glon )
assert glat . ndim == glon . ndim == 2
times = np . atleast_1d ( time )
assert times . ndim == 1
atmos = xarray . Dataset ( )
for k , t in enumerate ( times ) :
print ( 'computing' , t )
for i in range ( glat . shape [ 0 ] ) :
for j in range ( glat . shape [ 1 ] ) : # atmos = xarray . concat ( ( atmos , rungtd1d ( t , altkm , glat [ i , j ] , glon [ i , j ] ) ) ,
# data _ vars = ' minimal ' , coords = ' minimal ' , dim = ' lon ' )
atm = rungtd1d ( t , altkm , glat [ i , j ] , glon [ i , j ] , f107a = f107a , f107 = f107 , Ap = Ap )
atmos = xarray . merge ( ( atmos , atm ) )
atmos . attrs = atm . attrs
return atmos |
def wavenumber ( src , rec , depth , res , freq , wavenumber , ab = 11 , aniso = None , epermH = None , epermV = None , mpermH = None , mpermV = None , verb = 2 ) :
r"""Depreciated . Use ` dipole _ k ` instead .""" | # Issue warning
mesg = ( "\n The use of `model.wavenumber` is deprecated and will " + "be removed;\n use `model.dipole_k` instead." )
warnings . warn ( mesg , DeprecationWarning )
return dipole_k ( src , rec , depth , res , freq , wavenumber , ab , aniso , epermH , epermV , mpermH , mpermV , verb ) |
def retrieve_split_adjustment_data_for_sid ( self , dates , sid , split_adjusted_asof_idx ) :
"""dates : pd . DatetimeIndex
The calendar dates .
sid : int
The sid for which we want to retrieve adjustments .
split _ adjusted _ asof _ idx : int
The index in ` dates ` as - of which the data is split adjusted .
Returns
pre _ adjustments : tuple ( list ( float ) , list ( int ) , pd . DatetimeIndex )
The adjustment values and indexes in ` dates ` for
adjustments that happened before the split - asof - date .
post _ adjustments : tuple ( list ( float ) , list ( int ) , pd . DatetimeIndex )
The adjustment values , indexes in ` dates ` , and timestamps for
adjustments that happened after the split - asof - date .""" | adjustments = self . _split_adjustments . get_adjustments_for_sid ( 'splits' , sid )
sorted ( adjustments , key = lambda adj : adj [ 0 ] )
# Get rid of any adjustments that happen outside of our date index .
adjustments = list ( filter ( lambda x : dates [ 0 ] <= x [ 0 ] <= dates [ - 1 ] , adjustments ) )
adjustment_values = np . array ( [ adj [ 1 ] for adj in adjustments ] )
timestamps = pd . DatetimeIndex ( [ adj [ 0 ] for adj in adjustments ] )
# We need the first date on which we would have known about each
# adjustment .
date_indexes = dates . searchsorted ( timestamps )
pre_adjustment_idxs = np . where ( date_indexes <= split_adjusted_asof_idx ) [ 0 ]
last_adjustment_split_asof_idx = - 1
if len ( pre_adjustment_idxs ) :
last_adjustment_split_asof_idx = pre_adjustment_idxs . max ( )
pre_adjustments = ( adjustment_values [ : last_adjustment_split_asof_idx + 1 ] , date_indexes [ : last_adjustment_split_asof_idx + 1 ] )
post_adjustments = ( adjustment_values [ last_adjustment_split_asof_idx + 1 : ] , date_indexes [ last_adjustment_split_asof_idx + 1 : ] , timestamps [ last_adjustment_split_asof_idx + 1 : ] )
return pre_adjustments , post_adjustments |
def get_all_tags_of_recurring ( self , recurring_id ) :
"""Get all tags of recurring
This will iterate over all pages until it gets all elements .
So if the rate limit exceeded it will throw an Exception and you will get nothing
: param recurring _ id : the recurring id
: return : list""" | return self . _iterate_through_pages ( get_function = self . get_tags_of_recurring_per_page , resource = RECURRING_TAGS , ** { 'recurring_id' : recurring_id } ) |
def _get ( self , resource , payload = None ) :
'''Wrapper around requests . get that shorten caller url and takes care
of errors''' | # Avoid dangerous default function argument ` { } `
payload = payload or { }
# Build the request and return json response
return requests . get ( '{}/{}/{}' . format ( self . master , pyconsul . __consul_api_version__ , resource ) , params = payload ) |
def from_location ( cls , location ) :
"""Try to create a Ladybug location from a location string .
Args :
locationString : Location string
Usage :
l = Location . from _ location ( locationString )""" | if not location :
return cls ( )
try :
if hasattr ( location , 'isLocation' ) : # Ladybug location
return location
elif hasattr ( location , 'Latitude' ) : # Revit ' s location
return cls ( city = str ( location . Name . replace ( "," , " " ) ) , latitude = location . Latitude , longitude = location . Longitude )
elif location . startswith ( 'Site:' ) :
loc , city , latitude , longitude , time_zone , elevation = [ x . strip ( ) for x in re . findall ( r'\r*\n*([^\r\n]*)[,|;]' , location , re . DOTALL ) ]
else :
try :
city , latitude , longitude , time_zone , elevation = [ key . split ( ":" ) [ - 1 ] . strip ( ) for key in location . split ( "," ) ]
except ValueError : # it ' s just the city name
return cls ( city = location )
return cls ( city = city , country = None , latitude = latitude , longitude = longitude , time_zone = time_zone , elevation = elevation )
except Exception as e :
raise ValueError ( "Failed to create a Location from %s!\n%s" % ( location , e ) ) |
def _prepare_instance_properties ( self , properties , factory_properties ) : # type : ( dict , dict ) - > dict
"""Prepares the properties of a component instance , based on its
configuration , factory and framework properties
: param properties : Component instance properties
: param factory _ properties : Component factory " default " properties
: return : The merged properties""" | # Normalize given properties
if properties is None or not isinstance ( properties , dict ) :
properties = { }
# Use framework properties to fill missing ones
framework = self . __context . get_framework ( )
for property_name in factory_properties :
if property_name not in properties : # Missing property
value = framework . get_property ( property_name )
if value is not None : # Set the property value
properties [ property_name ] = value
return properties |
def main ( ) :
"""Run the program .""" | args = parse_arguments ( )
ext = os . path . splitext ( args . input_file ) [ - 1 ] . lower ( )
with gzip . open ( args . output_file , mode = 'wt' ) as outfile :
csvwriter = csv . writer ( outfile , delimiter = str ( '\t' ) , lineterminator = '\n' )
try :
if ext in ( '.tab' , '.txt' , '.tsv' ) :
with open ( args . input_file ) as infile :
for line in infile :
outline = line . strip ( ) . split ( '\t' )
csvwriter . writerow ( outline )
elif ext == '.csv' :
with open ( args . input_file ) as infile :
for line in infile :
outline = line . strip ( ) . split ( ',' )
csvwriter . writerow ( outline )
elif ext in ( '.xls' , '.xlsx' ) :
workbook = xlrd . open_workbook ( args . input_file )
worksheet = workbook . sheets ( ) [ 0 ]
for rownum in range ( worksheet . nrows ) :
csvwriter . writerow ( worksheet . row_values ( rownum ) )
else :
print ( '{"proc.error":"File extension not recognized."}' )
except Exception :
print ( '{"proc.error":"Corrupt or unrecognized file."}' )
raise |
def alter ( self , id_environment_vip , finalidade_txt , cliente_txt , ambiente_p44_txt , description ) :
"""Change Environment VIP from by the identifier .
: param id _ environment _ vip : Identifier of the Environment VIP . Integer value and greater than zero .
: param finalidade _ txt : Finality . String with a maximum of 50 characters and respect [ a - zA - Z \ _ - ]
: param cliente _ txt : ID Client . String with a maximum of 50 characters and respect [ a - zA - Z \ _ - ]
: param ambiente _ p44 _ txt : Environment P44 . String with a maximum of 50 characters and respect [ a - zA - Z \ _ - ]
: return : None
: raise InvalidParameterError : Environment VIP identifier is null and invalid .
: raise InvalidParameterError : The value of finalidade _ txt , cliente _ txt or ambiente _ p44 _ txt is invalid .
: raise EnvironmentVipNotFoundError : Environment VIP not registered .
: raise DataBaseError : Networkapi failed to access the database .
: raise XMLError : Networkapi failed to generate the XML response .""" | if not is_valid_int_param ( id_environment_vip ) :
raise InvalidParameterError ( u'The identifier of Environment VIP is invalid or was not informed.' )
environmentvip_map = dict ( )
environmentvip_map [ 'finalidade_txt' ] = finalidade_txt
environmentvip_map [ 'cliente_txt' ] = cliente_txt
environmentvip_map [ 'ambiente_p44_txt' ] = ambiente_p44_txt
environmentvip_map [ 'description' ] = description
url = 'environmentvip/' + str ( id_environment_vip ) + '/'
code , xml = self . submit ( { 'environment_vip' : environmentvip_map } , 'PUT' , url )
return self . response ( code , xml ) |
def condensed_coords_within ( pop , n ) :
"""Return indices into a condensed distance matrix for all
pairwise comparisons within the given population .
Parameters
pop : array _ like , int
Indices of samples or haplotypes within the population .
n : int
Size of the square matrix ( length of first or second dimension ) .
Returns
indices : ndarray , int""" | return [ condensed_coords ( i , j , n ) for i , j in itertools . combinations ( sorted ( pop ) , 2 ) ] |
def export_node ( bpmn_graph , export_elements , node , nodes_classification , order = 0 , prefix = "" , condition = "" , who = "" , add_join = False ) :
"""General method for node exporting
: param bpmn _ graph : an instance of BpmnDiagramGraph class ,
: param export _ elements : a dictionary object . The key is a node ID , value is a dictionary of parameters that
will be used in exported CSV document ,
: param node : networkx . Node object ,
: param nodes _ classification : dictionary of classification labels . Key - node id . Value - a list of labels ,
: param order : the order param of exported node ,
: param prefix : the prefix of exported node - if the task appears after some gateway , the prefix will identify
the branch
: param condition : the condition param of exported node ,
: param who : the condition param of exported node ,
: param add _ join : boolean flag . Used to indicate if " Join " element should be added to CSV .
: return : None or the next node object if the exported node was a gateway join .""" | node_type = node [ 1 ] [ consts . Consts . type ]
if node_type == consts . Consts . start_event :
return BpmnDiagramGraphCsvExport . export_start_event ( bpmn_graph , export_elements , node , nodes_classification , order = order , prefix = prefix , condition = condition , who = who )
elif node_type == consts . Consts . end_event :
return BpmnDiagramGraphCsvExport . export_end_event ( export_elements , node , order = order , prefix = prefix , condition = condition , who = who )
else :
return BpmnDiagramGraphCsvExport . export_element ( bpmn_graph , export_elements , node , nodes_classification , order = order , prefix = prefix , condition = condition , who = who , add_join = add_join ) |
def check_xsrf_cookie ( self ) -> None :
"""Verifies that the ` ` _ xsrf ` ` cookie matches the ` ` _ xsrf ` ` argument .
To prevent cross - site request forgery , we set an ` ` _ xsrf ` `
cookie and include the same value as a non - cookie
field with all ` ` POST ` ` requests . If the two do not match , we
reject the form submission as a potential forgery .
The ` ` _ xsrf ` ` value may be set as either a form field named ` ` _ xsrf ` `
or in a custom HTTP header named ` ` X - XSRFToken ` ` or ` ` X - CSRFToken ` `
( the latter is accepted for compatibility with Django ) .
See http : / / en . wikipedia . org / wiki / Cross - site _ request _ forgery
. . versionchanged : : 3.2.2
Added support for cookie version 2 . Both versions 1 and 2 are
supported .""" | # Prior to release 1.1.1 , this check was ignored if the HTTP header
# ` ` X - Requested - With : XMLHTTPRequest ` ` was present . This exception
# has been shown to be insecure and has been removed . For more
# information please see
# http : / / www . djangoproject . com / weblog / 2011 / feb / 08 / security /
# http : / / weblog . rubyonrails . org / 2011/2/8 / csrf - protection - bypass - in - ruby - on - rails
token = ( self . get_argument ( "_xsrf" , None ) or self . request . headers . get ( "X-Xsrftoken" ) or self . request . headers . get ( "X-Csrftoken" ) )
if not token :
raise HTTPError ( 403 , "'_xsrf' argument missing from POST" )
_ , token , _ = self . _decode_xsrf_token ( token )
_ , expected_token , _ = self . _get_raw_xsrf_token ( )
if not token :
raise HTTPError ( 403 , "'_xsrf' argument has invalid format" )
if not hmac . compare_digest ( utf8 ( token ) , utf8 ( expected_token ) ) :
raise HTTPError ( 403 , "XSRF cookie does not match POST argument" ) |
def _from_dict ( cls , _dict ) :
"""Initialize a DialogSuggestionValue object from a json dictionary .""" | args = { }
if 'input' in _dict :
args [ 'input' ] = MessageInput . _from_dict ( _dict . get ( 'input' ) )
if 'intents' in _dict :
args [ 'intents' ] = [ RuntimeIntent . _from_dict ( x ) for x in ( _dict . get ( 'intents' ) ) ]
if 'entities' in _dict :
args [ 'entities' ] = [ RuntimeEntity . _from_dict ( x ) for x in ( _dict . get ( 'entities' ) ) ]
return cls ( ** args ) |
def html_factory ( tag , ** defaults ) :
'''Returns an : class : ` Html ` factory function for ` ` tag ` ` and a given
dictionary of ` ` defaults ` ` parameters . For example : :
> > > input _ factory = html _ factory ( ' input ' , type = ' text ' )
> > > html = input _ factory ( value = ' bla ' )''' | def html_input ( * children , ** params ) :
p = defaults . copy ( )
p . update ( params )
return Html ( tag , * children , ** p )
return html_input |
def save_asset ( self , asset_form , * args , ** kwargs ) :
"""Pass through to provider AssetAdminSession . update _ asset""" | # Implemented from kitosid template for -
# osid . resource . ResourceAdminSession . update _ resource
if asset_form . is_for_update ( ) :
return self . update_asset ( asset_form , * args , ** kwargs )
else :
return self . create_asset ( asset_form , * args , ** kwargs ) |
def store ( auth , provider , config_location = DEFAULT_CONFIG_DIR ) :
"""Store auth info in file for specified provider""" | auth_file = None
try : # only for custom locations
_create_config_dir ( config_location , "Creating custom config directory [%s]... " )
config_dir = os . path . join ( config_location , NOIPY_CONFIG )
_create_config_dir ( config_dir , "Creating directory [%s]... " )
auth_file = os . path . join ( config_dir , provider )
print ( "Creating auth info file [%s]... " % auth_file , end = "" )
with open ( auth_file , 'w' ) as f :
buff = auth . base64key . decode ( 'utf-8' )
f . write ( buff )
print ( "OK." )
except IOError as e :
print ( '{0}: "{1}"' . format ( e . strerror , auth_file ) )
raise e |
def safe_print ( msg ) :
"""Safely print a given Unicode string to stdout ,
possibly replacing characters non - printable
in the current stdout encoding .
: param string msg : the message""" | try :
print ( msg )
except UnicodeEncodeError :
try : # NOTE encoding and decoding so that in Python 3 no b " . . . " is printed
encoded = msg . encode ( sys . stdout . encoding , "replace" )
decoded = encoded . decode ( sys . stdout . encoding , "replace" )
print ( decoded )
except ( UnicodeDecodeError , UnicodeEncodeError ) :
print ( u"[ERRO] An unexpected error happened while printing to stdout." )
print ( u"[ERRO] Please check that your file/string encoding matches the shell encoding." )
print ( u"[ERRO] If possible, set your shell encoding to UTF-8 and convert any files with legacy encodings." ) |
def _showItemContextMenu ( self , item , point , col ) :
"""Callback for contextMenuRequested ( ) signal . Pops up item menu , if defined""" | self . _startOrStopEditing ( )
menu = getattr ( item , '_menu' , None )
if menu : # self . _ current _ item tells callbacks what item the menu was referring to
self . _current_item = item
self . clearSelection ( )
self . setItemSelected ( item , True )
menu . exec_ ( point )
else :
self . _current_item = None |
def create_ca ( self , name , ca_name = '' , cert_type = crypto . TYPE_RSA , bits = 2048 , alt_names = None , years = 5 , serial = 0 , pathlen = 0 , overwrite = False ) :
"""Create a certificate authority
Arguments : name - The name of the CA
cert _ type - The type of the cert . TYPE _ RSA or TYPE _ DSA
bits - The number of bits to use
alt _ names - An array of alternative names in the format :
IP : address , DNS : address
Returns : KeyCertPair for the new CA""" | cakey = self . create_key_pair ( cert_type , bits )
req = self . create_request ( cakey , CN = name )
signing_key = cakey
signing_cert = req
parent_ca = ''
if ca_name :
ca_bundle = self . store . get_files ( ca_name )
signing_key = ca_bundle . key . load ( )
signing_cert = ca_bundle . cert . load ( )
parent_ca = ca_bundle . cert . file_path
basicConstraints = "CA:true"
# If pathlen is exactly 0 , this CA cannot sign intermediaries .
# A negative value leaves this out entirely and allows arbitrary
# numbers of intermediates .
if pathlen >= 0 :
basicConstraints += ', pathlen:' + str ( pathlen )
extensions = [ crypto . X509Extension ( b"basicConstraints" , True , basicConstraints . encode ( ) ) , crypto . X509Extension ( b"keyUsage" , True , b"keyCertSign, cRLSign" ) , crypto . X509Extension ( b"extendedKeyUsage" , True , b"serverAuth, clientAuth" ) , lambda cert : crypto . X509Extension ( b"subjectKeyIdentifier" , False , b"hash" , subject = cert ) , lambda cert : crypto . X509Extension ( b"authorityKeyIdentifier" , False , b"keyid:always" , issuer = cert ) , ]
if alt_names :
extensions . append ( crypto . X509Extension ( b"subjectAltName" , False , "," . join ( alt_names ) . encode ( ) ) )
# TODO : start time before today for clock skew ?
cacert = self . sign ( req , ( signing_cert , signing_key ) , ( 0 , 60 * 60 * 24 * 365 * years ) , extensions = extensions )
x509s = { 'key' : cakey , 'cert' : cacert , 'ca' : cacert }
self . store . add_files ( name , x509s , overwrite = overwrite , parent_ca = parent_ca , is_ca = True )
if ca_name :
self . store . add_sign_link ( ca_name , name )
return self . store . get_record ( name ) |
def toggle_tbstyle ( self , button ) :
"""Toogle the ToolButtonStyle of the given button between : data : ` ToolButtonIconOnly ` and : data : ` ToolButtonTextBesideIcon `
: param button : a tool button
: type button : : class : ` QtGui . QToolButton `
: returns : None
: rtype : None
: raises : None""" | old = button . toolButtonStyle ( )
if old == QtCore . Qt . ToolButtonIconOnly :
new = QtCore . Qt . ToolButtonTextBesideIcon
else :
new = QtCore . Qt . ToolButtonIconOnly
button . setToolButtonStyle ( new ) |
def create_new ( cls , mapreduce_id , shard_number ) :
"""Create new shard state .
Args :
mapreduce _ id : unique mapreduce id as string .
shard _ number : shard number for which to create shard state .
Returns :
new instance of ShardState ready to put into datastore .""" | shard_id = cls . shard_id_from_number ( mapreduce_id , shard_number )
state = cls ( key_name = shard_id , mapreduce_id = mapreduce_id )
return state |
def start ( component , exact ) : # type : ( str , str ) - > None
"""Create a new release branch .
Args :
component ( str ) :
Version component to bump when creating the release . Can be * major * ,
* minor * or * patch * .
exact ( str ) :
The exact version to set for the release . Overrides the component
argument . This allows to re - release a version if something went
wrong with the release upload .""" | version_file = conf . get_path ( 'version_file' , 'VERSION' )
develop = conf . get ( 'git.devel_branch' , 'develop' )
common . assert_on_branch ( develop )
with conf . within_proj_dir ( ) :
out = shell . run ( 'git status --porcelain' , capture = True ) . stdout
lines = out . split ( os . linesep )
has_changes = any ( not l . startswith ( '??' ) for l in lines if l . strip ( ) )
if has_changes :
log . info ( "Cannot release: there are uncommitted changes" )
exit ( 1 )
old_ver , new_ver = versioning . bump ( component , exact )
log . info ( "Bumping package version" )
log . info ( " old version: <35>{}" . format ( old_ver ) )
log . info ( " new version: <35>{}" . format ( new_ver ) )
with conf . within_proj_dir ( ) :
branch = 'release/' + new_ver
common . git_checkout ( branch , create = True )
log . info ( "Creating commit for the release" )
shell . run ( 'git add {ver_file} && git commit -m "{msg}"' . format ( ver_file = version_file , msg = "Releasing v{}" . format ( new_ver ) ) ) |
def find_key ( self , regex ) :
"""Attempts to find a single S3 key based on the passed regex
Given a regular expression , this method searches the S3 bucket
for a matching key , and returns it if exactly 1 key matches .
Otherwise , None is returned .
: param regex : ( str ) Regular expression for an S3 key
: return : ( str ) Full length S3 key matching the regex , None
otherwise""" | log = logging . getLogger ( self . cls_logger + '.find_key' )
if not isinstance ( regex , basestring ) :
log . error ( 'regex argument is not a string' )
return None
log . info ( 'Looking up a single S3 key based on regex: %s' , regex )
matched_keys = [ ]
for item in self . bucket . objects . all ( ) :
log . debug ( 'Checking if regex matches key: %s' , item . key )
match = re . search ( regex , item . key )
if match :
matched_keys . append ( item . key )
if len ( matched_keys ) == 1 :
log . info ( 'Found matching key: %s' , matched_keys [ 0 ] )
return matched_keys [ 0 ]
elif len ( matched_keys ) > 1 :
log . info ( 'Passed regex matched more than 1 key: %s' , regex )
return None
else :
log . info ( 'Passed regex did not match any key: %s' , regex )
return None |
def _name_things ( self ) :
"""Easy names for debugging""" | edges = { }
nodes = { None : 'root' }
for n in self . _tree . postorder_node_iter ( ) :
nodes [ n ] = '.' . join ( [ str ( x . taxon ) for x in n . leaf_nodes ( ) ] )
for e in self . _tree . preorder_edge_iter ( ) :
edges [ e ] = ' ---> ' . join ( [ nodes [ e . tail_node ] , nodes [ e . head_node ] ] )
r_edges = { value : key for key , value in edges . items ( ) }
r_nodes = { value : key for key , value in nodes . items ( ) }
return edges , nodes , r_edges , r_nodes |
def seek ( self , partition , offset ) :
"""Manually specify the fetch offset for a TopicPartition .
Overrides the fetch offsets that the consumer will use on the next
: meth : ` ~ kafka . KafkaConsumer . poll ` . If this API is invoked for the same
partition more than once , the latest offset will be used on the next
: meth : ` ~ kafka . KafkaConsumer . poll ` .
Note : You may lose data if this API is arbitrarily used in the middle of
consumption to reset the fetch offsets .
Arguments :
partition ( TopicPartition ) : Partition for seek operation
offset ( int ) : Message offset in partition
Raises :
AssertionError : If offset is not an int > = 0 ; or if partition is not
currently assigned .""" | if not isinstance ( partition , TopicPartition ) :
raise TypeError ( 'partition must be a TopicPartition namedtuple' )
assert isinstance ( offset , int ) and offset >= 0 , 'Offset must be >= 0'
assert partition in self . _subscription . assigned_partitions ( ) , 'Unassigned partition'
log . debug ( "Seeking to offset %s for partition %s" , offset , partition )
self . _subscription . assignment [ partition ] . seek ( offset ) |
def solve ( self ) :
"""Solve the pair .""" | cycle = [ "FR" , "RB" , "BL" , "LF" ]
combine = self . combine ( )
put = Formula ( Step ( "y" ) * cycle . index ( self . pair ) or [ ] )
self . cube ( put )
self . pair = "FR"
estimated = self . estimated_position ( )
for U_act in [ Formula ( ) , Formula ( "U" ) , Formula ( "U2" ) , Formula ( "U'" ) ] :
self . cube ( U_act )
for put_act in [ Formula ( "R U R'" ) , Formula ( "R U' R'" ) , Formula ( "R U2 R'" ) , Formula ( "F' U F" ) , Formula ( "F' U' F" ) , Formula ( "F' U2 F" ) ] :
self . cube ( put_act )
if self . get_pair ( ) == estimated :
return combine + put + U_act + put_act
self . cube ( put_act . reverse ( ) )
self . cube ( U_act . reverse ( ) ) |
def fill_sampling ( slice_list , N ) :
"""Given a list of slices , draw N samples such that each slice contributes as much as possible
Parameters
slice _ list : list of Slice
List of slices
N : int
Number of samples to draw""" | A = [ len ( s . inliers ) for s in slice_list ]
N_max = np . sum ( A )
if N > N_max :
raise ValueError ( "Tried to draw {:d} samples from a pool of only {:d} items" . format ( N , N_max ) )
samples_from = np . zeros ( ( len ( A ) , ) , dtype = 'int' )
# Number of samples to draw from each group
remaining = N
while remaining > 0 :
remaining_groups = np . flatnonzero ( samples_from - np . array ( A ) )
if remaining < len ( remaining_groups ) :
np . random . shuffle ( remaining_groups )
for g in remaining_groups [ : remaining ] :
samples_from [ g ] += 1
else : # Give each group the allowed number of samples . Constrain to their max size .
to_each = max ( 1 , int ( remaining / len ( remaining_groups ) ) )
samples_from = np . min ( np . vstack ( ( samples_from + to_each , A ) ) , axis = 0 )
# Update remaining count
remaining = int ( N - np . sum ( samples_from ) )
if not remaining == 0 :
raise ValueError ( "Still {:d} samples left! This is an error in the selection." )
# Construct index list of selected samples
samples = [ ]
for s , a , n in zip ( slice_list , A , samples_from ) :
if a == n :
samples . append ( np . array ( s . inliers ) )
# all
elif a == 0 :
samples . append ( np . arange ( [ ] ) )
else :
chosen = np . random . choice ( s . inliers , n , replace = False )
samples . append ( np . array ( chosen ) )
return samples |
def validate ( self ) :
"""Base validation + each cell is instance of DSM or MDM .""" | super ( ) . validate ( )
message_dsm = 'Matrix at [%s:%s] is not an instance of ' 'DesignStructureMatrix or MultipleDomainMatrix.'
message_ddm = 'Matrix at [%s:%s] is not an instance of ' 'DomainMappingMatrix or MultipleDomainMatrix.'
messages = [ ]
for i , row in enumerate ( self . data ) :
for j , cell in enumerate ( row ) :
if i == j :
if not isinstance ( cell , ( DesignStructureMatrix , MultipleDomainMatrix ) ) :
messages . append ( message_dsm % ( i , j ) )
elif not isinstance ( cell , ( DomainMappingMatrix , MultipleDomainMatrix ) ) :
messages . append ( message_ddm % ( i , j ) )
if messages :
raise self . error ( '\n' . join ( messages ) ) |
def function_from_string ( func_string ) :
"""Returns a function object from the function string .
: param the string which needs to be resolved .""" | func = None
func_string_splitted = func_string . split ( '.' )
module_name = '.' . join ( func_string_splitted [ : - 1 ] )
function_name = func_string_splitted [ - 1 ]
module = import_module ( module_name )
if module and function_name :
func = getattr ( module , function_name )
return func |
async def main ( ) :
"""Main class .""" | opts , args = option_parser ( )
url = args [ 0 ]
if opts . links :
getlinks ( url )
raise SystemExit ( 0 )
depth = opts . depth
sTime = time . time ( )
webcrawler = Webcrawler ( url , depth )
webcrawler . crawl ( )
eTime = time . time ( )
tTime = eTime - sTime
print ( "CRAWLER STARTED:" )
print ( "%s, will crawl upto depth %d" % ( url , depth ) )
print ( "*****RESULTS" )
print ( "\n" . join ( webcrawler . urls ) )
print ( "=" * 100 )
print ( "Crawler Statistics" )
print ( "=" * 100 )
print ( "No of links Found: %d" % webcrawler . links )
print ( "No of followed: %d" % webcrawler . followed )
print ( "Time Stats : Found all links after %0.2fs" % tTime ) |
def is_network_source_fw ( cls , nwk , nwk_name ) :
"""Check if SOURCE is FIREWALL , if yes return TRUE .
If source is None or entry not in NWK DB , check from Name .
Name should have constant AND length should match .""" | if nwk is not None :
if nwk . source == fw_const . FW_CONST :
return True
return False
if nwk_name in fw_const . DUMMY_SERVICE_NWK and ( len ( nwk_name ) == len ( fw_const . DUMMY_SERVICE_NWK ) + fw_const . SERVICE_NAME_EXTRA_LEN ) :
return True
if nwk_name in fw_const . IN_SERVICE_NWK and ( len ( nwk_name ) == len ( fw_const . IN_SERVICE_NWK ) + fw_const . SERVICE_NAME_EXTRA_LEN ) :
return True
if nwk_name in fw_const . OUT_SERVICE_NWK and ( len ( nwk_name ) == len ( fw_const . OUT_SERVICE_NWK ) + fw_const . SERVICE_NAME_EXTRA_LEN ) :
return True
return False |
def sorted_outrows ( outrows ) : # type : ( Iterable [ InstalledCSVRow ] ) - > List [ InstalledCSVRow ]
"""Return the given rows of a RECORD file in sorted order .
Each row is a 3 - tuple ( path , hash , size ) and corresponds to a record of
a RECORD file ( see PEP 376 and PEP 427 for details ) . For the rows
passed to this function , the size can be an integer as an int or string ,
or the empty string .""" | # Normally , there should only be one row per path , in which case the
# second and third elements don ' t come into play when sorting .
# However , in cases in the wild where a path might happen to occur twice ,
# we don ' t want the sort operation to trigger an error ( but still want
# determinism ) . Since the third element can be an int or string , we
# coerce each element to a string to avoid a TypeError in this case .
# For additional background , see - -
# https : / / github . com / pypa / pip / issues / 5868
return sorted ( outrows , key = lambda row : tuple ( str ( x ) for x in row ) ) |
def _compute_standard_dev ( self , rup , imt , C ) :
"""Compute the the standard deviation in terms of magnitude
described on page 744 , eq . 4""" | sigma_mean = 0.
if imt . name in "SA PGA" :
psi = - 6.898E-3
else :
psi = - 3.054E-5
if rup . mag <= 6.5 :
sigma_mean = ( C [ 'c12' ] * rup . mag ) + C [ 'c13' ]
elif rup . mag > 6.5 :
sigma_mean = ( psi * rup . mag ) + C [ 'c14' ]
return sigma_mean |
def mac_address_table_static_mac_address ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
mac_address_table = ET . SubElement ( config , "mac-address-table" , xmlns = "urn:brocade.com:mgmt:brocade-mac-address-table" )
static = ET . SubElement ( mac_address_table , "static" )
forward_key = ET . SubElement ( static , "forward" )
forward_key . text = kwargs . pop ( 'forward' )
interface_type_key = ET . SubElement ( static , "interface-type" )
interface_type_key . text = kwargs . pop ( 'interface_type' )
interface_name_key = ET . SubElement ( static , "interface-name" )
interface_name_key . text = kwargs . pop ( 'interface_name' )
vlan_key = ET . SubElement ( static , "vlan" )
vlan_key . text = kwargs . pop ( 'vlan' )
vlanid_key = ET . SubElement ( static , "vlanid" )
vlanid_key . text = kwargs . pop ( 'vlanid' )
mac_address = ET . SubElement ( static , "mac-address" )
mac_address . text = kwargs . pop ( 'mac_address' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def get_icohp_dict_of_site ( self , site , minsummedicohp = None , maxsummedicohp = None , minbondlength = 0.0 , maxbondlength = 8.0 , only_bonds_to = None ) :
"""get a dict of IcohpValue for a certain site ( indicated by integer )
Args :
site : integer describing the site of interest , order as in Icohplist . lobster / Icooplist . lobster , starts at 0
minsummedicohp : float , minimal icohp / icoop of the bonds that are considered . It is the summed ICOHP value from both spin channels for spin polarized cases
maxsummedicohp : float , maximal icohp / icoop of the bonds that are considered . It is the summed ICOHP value from both spin channels for spin polarized cases
minbondlength : float , defines the minimum of the bond lengths of the bonds
maxbondlength : float , defines the maximum of the bond lengths of the bonds
only _ bonds _ to : list of strings describing the bonding partners that are allowed , e . g . [ ' O ' ]
Returns :
dict of IcohpValues , the keys correspond to the values from the initial list _ labels""" | newicohp_dict = { }
for key , value in self . _icohplist . items ( ) :
atomnumber1 = int ( re . split ( r'(\d+)' , value . _atom1 ) [ 1 ] ) - 1
atomnumber2 = int ( re . split ( r'(\d+)' , value . _atom2 ) [ 1 ] ) - 1
if site == atomnumber1 or site == atomnumber2 : # manipulate order of atoms so that searched one is always atom1
if site == atomnumber2 :
save = value . _atom1
value . _atom1 = value . _atom2
value . _atom2 = save
if only_bonds_to is None :
second_test = True
else :
second_test = ( re . split ( r'(\d+)' , value . _atom2 ) [ 0 ] in only_bonds_to )
if value . _length >= minbondlength and value . _length <= maxbondlength and second_test :
if minsummedicohp is not None :
if value . summed_icohp >= minsummedicohp :
if maxsummedicohp is not None :
if value . summed_icohp <= maxsummedicohp :
newicohp_dict [ key ] = value
else :
newicohp_dict [ key ] = value
else :
if maxsummedicohp is not None :
if value . summed_icohp <= maxsummedicohp :
newicohp_dict [ key ] = value
else :
newicohp_dict [ key ] = value
return newicohp_dict |
def load_from_string ( self , content , container , ** kwargs ) :
"""Load config from given string ' content ' .
: param content : Config content string
: param container : callble to make a container object later
: param kwargs : optional keyword parameters to be sanitized : : dict
: return : Dict - like object holding config parameters""" | _not_implemented ( self , content , container , ** kwargs ) |
def doi_to_wos ( wosclient , doi ) :
"""Convert DOI to WOS identifier .""" | results = query ( wosclient , 'DO="%s"' % doi , './REC/UID' , count = 1 )
return results [ 0 ] . lstrip ( 'WOS:' ) if results else None |
def __skip_this ( self , level ) :
"""Check whether this comparison should be skipped because one of the objects to compare meets exclusion criteria .
: rtype : bool""" | skip = False
if self . exclude_paths and level . path ( ) in self . exclude_paths :
skip = True
elif self . exclude_regex_paths and any ( [ exclude_regex_path . search ( level . path ( ) ) for exclude_regex_path in self . exclude_regex_paths ] ) :
skip = True
else :
if self . exclude_types_tuple and ( isinstance ( level . t1 , self . exclude_types_tuple ) or isinstance ( level . t2 , self . exclude_types_tuple ) ) :
skip = True
return skip |
def get_screen_density ( self ) -> str :
'''Show device screen density ( PPI ) .''' | output , _ = self . _execute ( '-s' , self . device_sn , 'shell' , 'wm' , 'density' )
return output . split ( ) [ 2 ] |
def GetServices ( ) :
"""Obtains all the connected eDNA services .
: return : A pandas DataFrame of connected eDNA services in the form [ Name ,
Description , Type , Status ]""" | # Define all required variables in the correct ctypes format
pulKey = c_ulong ( 0 )
szType = c_char_p ( "" . encode ( 'utf-8' ) )
szStartSvcName = c_char_p ( "" . encode ( 'utf-8' ) )
szSvcName , szSvcDesc = create_string_buffer ( 30 ) , create_string_buffer ( 90 )
szSvcType , szSvcStat = create_string_buffer ( 30 ) , create_string_buffer ( 30 )
szSvcName2 , szSvcDesc2 = create_string_buffer ( 30 ) , create_string_buffer ( 90 )
szSvcType2 , szSvcStat2 = create_string_buffer ( 30 ) , create_string_buffer ( 30 )
nSvcName , nSvcDesc = c_ushort ( 30 ) , c_ushort ( 90 )
nSvcType , nSvcStat = c_ushort ( 30 ) , c_ushort ( 30 )
# Call the eDNA function . nRet is zero if the function is successful .
services = [ ]
nRet = dna_dll . DnaGetServiceEntry ( szType , szStartSvcName , byref ( pulKey ) , byref ( szSvcName ) , nSvcName , byref ( szSvcDesc ) , nSvcDesc , byref ( szSvcType ) , nSvcType , byref ( szSvcStat ) , nSvcStat )
serv = _FormatServices ( szSvcName , szSvcDesc , szSvcType , szSvcStat )
if serv :
services . append ( serv )
# Iterate across all the returned services
while nRet == 0 :
nRet = dna_dll . DnaGetNextServiceEntry ( pulKey , byref ( szSvcName2 ) , nSvcName , byref ( szSvcDesc2 ) , nSvcDesc , byref ( szSvcType2 ) , nSvcType , byref ( szSvcStat2 ) , nSvcStat )
# We want to ensure only UTF - 8 characters are returned . Ignoring
# characters is slightly unsafe , but they should only occur in the
# units or description , so it ' s not a huge issue .
serv = _FormatServices ( szSvcName2 , szSvcDesc2 , szSvcType2 , szSvcStat2 )
if serv :
services . append ( serv )
# If no results were returned , raise a warning
df = pd . DataFrame ( )
if services :
df = pd . DataFrame ( services , columns = [ "Name" , "Description" , "Type" , "Status" ] )
else :
warnings . warn ( "WARNING- No connected eDNA services detected. Check " + "your DNASys.ini file and your network connection." )
return df |
def cut ( self , bits , start = None , end = None , count = None ) :
"""Return bitstring generator by cutting into bits sized chunks .
bits - - The size in bits of the bitstring chunks to generate .
start - - The bit position to start the first cut . Defaults to 0.
end - - The bit position one past the last bit to use in the cut .
Defaults to self . len .
count - - If specified then at most count items are generated .
Default is to cut as many times as possible .""" | start , end = self . _validate_slice ( start , end )
if count is not None and count < 0 :
raise ValueError ( "Cannot cut - count must be >= 0." )
if bits <= 0 :
raise ValueError ( "Cannot cut - bits must be >= 0." )
c = 0
while count is None or c < count :
c += 1
nextchunk = self . _slice ( start , min ( start + bits , end ) )
if nextchunk . len != bits :
return
assert nextchunk . _assertsanity ( )
yield nextchunk
start += bits
return |
def trace_error ( function_index = 2 ) :
"""This will return the line number and line text of the last error
: param function _ index : int to tell what frame to look from
: return : int , str of the line number and line text""" | info = function_info ( function_index )
traces = traceback . format_stack ( limit = 10 )
for trace in traces :
file_ , line_number , line_text = trace . split ( ',' , 2 )
if file_ == ' File "%s"' % info [ 'file' ] and line_number != 'line %s' % info [ 'line_number' ] :
return line_number . split ( ) [ - 1 ] , line_text . strip ( )
return None , None |
def get_minimum ( self ) :
'''Return
( t1 , t2 , value ) triple where the value is the minimal one .''' | return ( self . _min_value_t1 , self . _min_value_t2 , self . _min_value , self . _min_value_data ) |
def bibtex_run ( self ) :
'''Start bibtex run .''' | self . log . info ( 'Running bibtex...' )
try :
with open ( os . devnull , 'w' ) as null :
Popen ( [ 'bibtex' , self . project_name ] , stdout = null ) . wait ( )
except OSError :
self . log . error ( NO_LATEX_ERROR % 'bibtex' )
sys . exit ( 1 )
shutil . copy ( '%s.bib' % self . bib_file , '%s.bib.old' % self . bib_file ) |
def uninstall_all_passbands ( local = True ) :
"""Uninstall all passbands , either globally or locally ( need to call twice to
delete ALL passbands )
If local = False , you must have permission to access the installation directory""" | pbdir = _pbdir_local if local else _pbdir_global
for f in os . listdir ( pbdir ) :
pbpath = os . path . join ( pbdir , f )
logger . warning ( "deleting file: {}" . format ( pbpath ) )
os . remove ( pbpath ) |
def create_order ( self , debtor , is_vat_included = True , due_date = None , heading = '' , text_line1 = '' , text_line2 = '' , debtor_data = None , delivery_data = None , products = None , project = None , other_reference = '' , model = models . Order , ** extra ) :
"""Create a new Order .
Args :
debtor ( Debtor ) : the debtor of the order
debtor _ data ( mapping ) : map of debtor data { ' postal _ code : . . , ' city ' : . . , ' ean ' : . . }
defaults to values on debitor instance for missing values
delivery _ data ( mapping ) : map of delivery data { ' address ' : . . . , ' postal _ code ' : . . . }
defaults to values on debitor instance for missing values
due _ date ( datetime ) : due date
heading ( string ) : heading to be displayed in the order pdf
text _ line1 ( string ) : first order description line
text _ line2 ( string ) : second order description line
other _ reference ( string ) : custom string to be used for identification
extra ( mapping ) : mapping of extra values to be passed in to the server call
Returns :
Order instance""" | debtor_data = debtor_data or { }
delivery_data = delivery_data or { }
delivery_date = delivery_data . get ( 'date' , datetime . datetime . now ( ) )
our_reference = extra . get ( 'our_reference' , debtor . our_reference )
currency = extra . get ( 'currency' , debtor . currency )
layout = extra . get ( 'layout' , debtor . layout )
term_of_payment = extra . get ( 'term_of_payment' , debtor . term_of_payment )
date = extra . get ( 'date' , datetime . datetime . now ( ) )
order_input = { 'debtor' : debtor , 'number' : extra . get ( 'number' , 1 ) , 'project' : project , }
for dd in [ 'name' , 'address' , 'postal_code' , 'city' , 'country' , 'ean' ] :
order_input [ 'debtor_%s' % dd ] = debtor_data . get ( dd , getattr ( debtor , dd ) )
for dd in [ 'address' , 'postal_code' , 'city' , 'country' ] :
order_input [ 'delivery_%s' % dd ] = delivery_data . get ( dd , getattr ( debtor , dd ) )
order_input . update ( { 'delivery_date' : delivery_date or datetime . datetime . now ( ) , 'heading' : heading , 'text_line1' : text_line1 , 'text_line2' : text_line2 , 'is_archived' : extra . get ( 'is_archived' , 0 ) , 'is_sent' : extra . get ( 'is_sent' , 0 ) , 'net_amount' : extra . get ( 'net_amount' , 0 ) , 'vat_amount' : extra . get ( 'vat_amount' , 0 ) , 'gross_amount' : extra . get ( 'gross_amount' , 0 ) , 'margin' : extra . get ( 'margin' , 0 ) , 'margin_as_percent' : extra . get ( 'margin_as_percent' , 0 ) , 'date' : date , 'our_reference' : our_reference , 'other_reference' : other_reference , 'currency' : currency , 'exchange_rate' : extra . get ( 'exchange_rate' , 1.0 ) , 'is_vat_included' : is_vat_included , 'layout' : layout , 'due_date' : due_date or datetime . datetime . now ( ) , 'term_of_payment' : term_of_payment } )
order_input . update ( extra )
order = self . create ( model , ** order_input )
if products :
for product in products :
self . create_orderline ( order , product )
return order |
def set_configs ( self , key , d ) :
"""Set the whole configuration for a key""" | if '_config' in self . proxy :
self . proxy [ '_config' ] [ key ] = d
else :
self . proxy [ '_config' ] = { key : d } |
def _make_parameters ( self ) :
"""Converts a list of Parameters into DEAP format .""" | self . value_means = [ ]
self . value_ranges = [ ]
self . arrangement = [ ]
self . variable_parameters = [ ]
current_var = 0
for parameter in self . parameters :
if parameter . type == ParameterType . DYNAMIC :
self . value_means . append ( parameter . value [ 0 ] )
if parameter . value [ 1 ] < 0 :
raise AttributeError ( '"{}" parameter has an invalid range. Range values ' 'must be greater than zero' . format ( parameter . label ) )
self . value_ranges . append ( parameter . value [ 1 ] )
var_label = 'var{}' . format ( current_var )
self . arrangement . append ( var_label )
self . variable_parameters . append ( var_label )
current_var += 1
elif parameter . type == ParameterType . STATIC :
self . arrangement . append ( parameter . value )
else :
raise AttributeError ( '"{}"Unknown parameter type ({}). Parameters can be STATIC or' ' DYNAMIC.' . format ( parameter . type ) )
return |
def choose ( self , context_ms = None ) :
"""Returns a point chosen by the interest model""" | try :
if self . context_mode is None :
x = self . interest_model . sample ( )
else :
if self . context_mode [ "mode" ] == 'mdmsds' :
if self . expl_dims == self . conf . s_dims :
x = np . hstack ( ( context_ms [ self . conf . m_ndims // 2 : ] , self . interest_model . sample_given_context ( context_ms [ self . conf . m_ndims // 2 : ] , range ( self . conf . s_ndims // 2 ) ) ) )
else :
if self . context_mode [ 'choose_m' ] :
x = self . interest_model . sample ( )
else :
x = np . hstack ( ( context_ms [ : self . conf . m_ndims // 2 ] , self . interest_model . sample_given_context ( context_ms [ : self . conf . m_ndims // 2 ] , range ( self . conf . m_ndims // 2 ) ) ) )
elif self . context_mode [ "mode" ] == 'mcs' :
x = np . hstack ( ( context_ms , self . interest_model . sample_given_context ( context_ms , range ( self . context_mode [ "context_n_dims" ] ) ) ) )
except ExplautoBootstrapError :
logger . warning ( 'Interest model not bootstrapped yet' )
x = rand_bounds ( self . conf . bounds [ : , self . expl_dims ] ) . flatten ( )
if self . context_mode is not None :
x = x [ list ( set ( self . expl_dims ) - set ( self . context_mode [ 'context_dims' ] ) ) ]
return x |
def tune_learning_rate ( self , h , parameter_list = None ) :
"""Naive tuning of the the learning rate on the in - sample data
Parameters
h : int
How many steps to run Aggregate on
parameter _ list : list
List of parameters to search for a good learning rate over
Returns
- Void ( changes self . learning _ rate )""" | if parameter_list is None :
parameter_list = [ 0.001 , 0.01 , 0.1 , 1.0 , 10.0 , 100.0 , 1000.0 , 10000.0 , 100000.0 ]
for parameter in parameter_list :
self . learning_rate = parameter
_ , losses , _ = self . run ( h , recalculate = False )
loss = losses [ 0 ]
if parameter == parameter_list [ 0 ] :
best_rate = parameter
best_loss = loss
else :
if loss < best_loss :
best_loss = loss
best_rate = parameter
self . learning_rate = best_rate |
def server_call ( method , server , timeout = DEFAULT_TIMEOUT , verify_ssl = True , ** parameters ) :
"""Makes a call to an un - authenticated method on a server
: param method : The method name .
: type method : str
: param server : The MyGeotab server .
: type server : str
: param timeout : The timeout to make the call , in seconds . By default , this is 300 seconds ( or 5 minutes ) .
: type timeout : float
: param verify _ ssl : If True , verify the SSL certificate . It ' s recommended not to modify this .
: type verify _ ssl : bool
: param parameters : Additional parameters to send ( for example , search = dict ( id = ' b123 ' ) ) .
: raise MyGeotabException : Raises when an exception occurs on the MyGeotab server .
: raise TimeoutException : Raises when the request does not respond after some time .
: return : The result from the server .""" | if method is None :
raise Exception ( "A method name must be specified" )
if server is None :
raise Exception ( "A server (eg. my3.geotab.com) must be specified" )
parameters = process_parameters ( parameters )
return _query ( server , method , parameters , timeout = timeout , verify_ssl = verify_ssl ) |
def to_bytes ( self ) :
'''Create bytes from properties''' | # Verify that properties make sense
self . sanitize ( )
# Start with the type
bitstream = BitArray ( 'uint:4=%d' % self . message_type )
# Add the flags
bitstream += BitArray ( 'bool=%d, bool=%d, bool=%d' % ( self . probe , self . enlra_enabled , self . security ) )
# Add padding
bitstream += self . _reserved1
# Add record count
bitstream += BitArray ( 'uint:8=%d' % len ( self . records ) )
# Add the nonce
nonce = bytes ( self . nonce )
if len ( nonce ) < 8 :
padding_len = 8 - len ( nonce )
bitstream += BitArray ( 8 * padding_len )
bitstream += BitArray ( bytes = nonce )
# Add the map - reply records
for record in self . records :
bitstream += record . to_bitstream ( )
# If the security flag is set then there should be security data here
# TODO : deal with security flag [ LISP - Security ]
if self . security :
raise NotImplementedError ( 'Handling security data is not ' + 'implemented yet' )
return bitstream . bytes |
def ekopr ( fname ) :
"""Open an existing E - kernel file for reading .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / ekopr _ c . html
: param fname : Name of EK file .
: type fname : str
: return : Handle attached to EK file .
: rtype : int""" | fname = stypes . stringToCharP ( fname )
handle = ctypes . c_int ( )
libspice . ekopr_c ( fname , ctypes . byref ( handle ) )
return handle . value |
def create_service_key ( self , service_name , key_name ) :
"""Create a service key for the given service .""" | if self . has_key ( service_name , key_name ) :
logging . warning ( "Reusing existing service key %s" % ( key_name ) )
return self . get_service_key ( service_name , key_name )
body = { 'service_instance_guid' : self . get_instance_guid ( service_name ) , 'name' : key_name }
return self . api . post ( '/v2/service_keys' , body ) |
def maxsize ( self , size ) :
"""Resize the cache , evicting the oldest items if necessary .""" | if size < 0 :
raise ValueError ( 'maxsize must be non-negative' )
with self . _lock :
self . _enforce_size_limit ( size )
self . _maxsize = size |
def is_maximal_matching ( G , matching ) :
"""Determines whether the given set of edges is a maximal matching .
A matching is a subset of edges in which no node occurs more than
once . The cardinality of a matching is the number of matched edges .
A maximal matching is one where one cannot add any more edges
without violating the matching rule .
Parameters
G : NetworkX graph
The graph on which to check the maximal matching .
edges : iterable
A iterable of edges .
Returns
is _ matching : bool
True if the given edges are a maximal matching .
Example
This example checks two sets of edges , both derived from a
single Chimera unit cell , for a matching . The first set ( a matching ) is
a subset of the second , which was found using the ` min _ maximal _ matching ( ) `
function .
> > > import dwave _ networkx as dnx
> > > G = dnx . chimera _ graph ( 1 , 1 , 4)
> > > dnx . is _ matching ( { ( 0 , 4 ) , ( 2 , 7 ) } )
True
> > > dnx . is _ maximal _ matching ( G , { ( 0 , 4 ) , ( 2 , 7 ) } )
False
> > > dnx . is _ maximal _ matching ( G , { ( 0 , 4 ) , ( 1 , 5 ) , ( 2 , 7 ) , ( 3 , 6 ) } )
True""" | touched_nodes = set ( ) . union ( * matching )
# first check if a matching
if len ( touched_nodes ) != len ( matching ) * 2 :
return False
# now for each edge , check that at least one of its variables is
# already in the matching
for ( u , v ) in G . edges :
if u not in touched_nodes and v not in touched_nodes :
return False
return True |
def filter ( self , func ) :
""": param func :
: type func : ( K , T ) - > bool
: rtype : TList [ T ]
Usage :
> > > TDict ( k1 = 1 , k2 = 2 , k3 = 3 ) . filter ( lambda k , v : v < 2)""" | return TList ( [ v for k , v in self . items ( ) if func ( k , v ) ] ) |
def _compute ( self , data ) :
"""Perform the calculation .""" | local_ts = self . _local_ts ( * data )
dt = local_ts [ internal_names . TIME_WEIGHTS_STR ]
# Convert dt to units of days to prevent overflow
dt = dt / np . timedelta64 ( 1 , 'D' )
return local_ts , dt |
def _merge_pool_kwargs ( self , override ) :
"""Merge a dictionary of override values for self . connection _ pool _ kw .
This does not modify self . connection _ pool _ kw and returns a new dict .
Any keys in the override dictionary with a value of ` ` None ` ` are
removed from the merged dictionary .""" | base_pool_kwargs = self . connection_pool_kw . copy ( )
if override :
for key , value in override . items ( ) :
if value is None :
try :
del base_pool_kwargs [ key ]
except KeyError :
pass
else :
base_pool_kwargs [ key ] = value
return base_pool_kwargs |
def parse_changes ( ) :
"""grab version from CHANGES and validate entry""" | with open ( 'CHANGES' ) as changes :
for match in re . finditer ( RE_CHANGES , changes . read ( 1024 ) , re . M ) :
if len ( match . group ( 1 ) ) != len ( match . group ( 3 ) ) :
error ( 'incorrect underline in CHANGES' )
date = datetime . datetime . strptime ( match . group ( 4 ) , '%Y-%m-%d' ) . date ( )
if date != datetime . date . today ( ) :
error ( 'release date is not today' )
return match . group ( 2 )
error ( 'invalid release entry in CHANGES' ) |
def dimap ( D , I ) :
"""Function to map directions to x , y pairs in equal area projection
Parameters
D : list or array of declinations ( as float )
I : list or array or inclinations ( as float )
Returns
XY : x , y values of directions for equal area projection [ x , y ]""" | try :
D = float ( D )
I = float ( I )
except TypeError : # is an array
return dimap_V ( D , I )
# DEFINE FUNCTION VARIABLES
# initialize equal area projection x , y
XY = [ 0. , 0. ]
# GET CARTESIAN COMPONENTS OF INPUT DIRECTION
X = dir2cart ( [ D , I , 1. ] )
# CHECK IF Z = 1 AND ABORT
if X [ 2 ] == 1.0 :
return XY
# return [ 0,0]
# TAKE THE ABSOLUTE VALUE OF Z
if X [ 2 ] < 0 : # this only works on lower hemisphere projections
X [ 2 ] = - X [ 2 ]
# CALCULATE THE X , Y COORDINATES FOR THE EQUAL AREA PROJECTION
# from Collinson 1983
R = old_div ( np . sqrt ( 1. - X [ 2 ] ) , ( np . sqrt ( X [ 0 ] ** 2 + X [ 1 ] ** 2 ) ) )
XY [ 1 ] , XY [ 0 ] = X [ 0 ] * R , X [ 1 ] * R
# RETURN XY [ X , Y ]
return XY |
def get_bool ( self , key , default = None ) :
"""Args :
key ( str | unicode ) : Key to lookup
default ( bool | None ) : Default to use if key is not configured
Returns :
( bool | None ) : Value of key , if defined""" | value = self . get_str ( key )
if value is not None :
return to_boolean ( value )
return default |
def get_shared_nodes ( G1 : nx . DiGraph , G2 : nx . DiGraph ) -> List [ str ] :
"""Get all the nodes that are common to both networks .""" | return list ( set ( G1 . nodes ( ) ) . intersection ( set ( G2 . nodes ( ) ) ) ) |
def get_minimal_id ( self ) :
"""Returns the minimal tweet ID of the current response
: returns : minimal tweet identification number
: raises : TwitterSearchException""" | if not self . __response :
raise TwitterSearchException ( 1013 )
return min ( self . __response [ 'content' ] [ 'statuses' ] if self . __order_is_search else self . __response [ 'content' ] , key = lambda i : i [ 'id' ] ) [ 'id' ] - 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.