signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def _with_columns ( self , columns ) :
"""Create a table from a sequence of columns , copying column labels .""" | table = type ( self ) ( )
for label , column in zip ( self . labels , columns ) :
self . _add_column_and_format ( table , label , column )
return table |
def cmd_alt ( self , args ) :
'''show altitude''' | print ( "Altitude: %.1f" % self . status . altitude )
qnh_pressure = self . get_mav_param ( 'AFS_QNH_PRESSURE' , None )
if qnh_pressure is not None and qnh_pressure > 0 :
ground_temp = self . get_mav_param ( 'GND_TEMP' , 21 )
pressure = self . master . field ( 'SCALED_PRESSURE' , 'press_abs' , 0 )
qnh_alt = self . altitude_difference ( qnh_pressure , pressure , ground_temp )
print ( "QNH Alt: %u meters %u feet for QNH pressure %.1f" % ( qnh_alt , qnh_alt * 3.2808 , qnh_pressure ) )
print ( "QNH Estimate: %.1f millibars" % self . qnh_estimate ( ) ) |
def add_space ( self , line ) :
"""Add a Space object to the section
Used during initial parsing mainly
Args :
line ( str ) : one line that defines the space , maybe whitespaces""" | if not isinstance ( self . last_item , Space ) :
space = Space ( self . _structure )
self . _structure . append ( space )
self . last_item . add_line ( line )
return self |
def get_hdrs ( flds_all , ** kws ) :
"""Return headers , given user - specified key - word args .""" | # Return Headers if the user explicitly lists them .
hdrs = kws . get ( 'hdrs' , None )
if hdrs is not None :
return hdrs
# User may specify a subset of fields or a column order using prt _ flds
if 'prt_flds' in kws :
return kws [ 'prt_flds' ]
# All fields in the namedtuple will be in the headers
return flds_all |
def substr ( self , name , start = None , size = None ) :
"""Return a substring of the string at key ` ` name ` ` . ` ` start ` ` and ` ` size ` `
are 0 - based integers specifying the portion of the string to return .
Like * * Redis . SUBSTR * *
: param string name : the key name
: param int start : Optional , the offset of first byte returned . If start
is negative , the returned string will start at the start ' th character
from the end of string .
: param int size : Optional , number of bytes returned . If size is
negative , then that many characters will be omitted from the end of string .
: return : The extracted part of the string .
: rtype : string
> > > ssdb . set ( ' str _ test ' , ' abc12345678 ' )
True
> > > ssdb . substr ( ' str _ test ' , 2 , 4)
' c123'
> > > ssdb . substr ( ' str _ test ' , - 2 , 2)
'78'
> > > ssdb . substr ( ' str _ test ' , 1 , - 1)
' bc1234567'""" | if start is not None and size is not None :
start = get_integer ( 'start' , start )
size = get_integer ( 'size' , size )
return self . execute_command ( 'substr' , name , start , size )
elif start is not None :
start = get_integer ( 'start' , start )
return self . execute_command ( 'substr' , name , start )
return self . execute_command ( 'substr' , name ) |
def ad_address ( mode , hit_id ) :
"""Get the address of the ad on AWS .
This is used at the end of the experiment to send participants
back to AWS where they can complete and submit the HIT .""" | if mode == "debug" :
address = '/complete'
elif mode in [ "sandbox" , "live" ] :
username = os . getenv ( 'psiturk_access_key_id' , config . get ( "psiTurk Access" , "psiturk_access_key_id" ) )
password = os . getenv ( 'psiturk_secret_access_id' , config . get ( "psiTurk Access" , "psiturk_secret_access_id" ) )
try :
req = requests . get ( 'https://api.psiturk.org/api/ad/lookup/' + hit_id , auth = ( username , password ) )
except :
raise ValueError ( 'api_server_not_reachable' )
else :
if req . status_code == 200 :
hit_address = req . json ( ) [ 'ad_id' ]
else :
raise ValueError ( "something here" )
if mode == "sandbox" :
address = ( 'https://sandbox.ad.psiturk.org/complete/' + str ( hit_address ) )
elif mode == "live" :
address = 'https://ad.psiturk.org/complete/' + str ( hit_address )
else :
raise ValueError ( "Unknown mode: {}" . format ( mode ) )
return success_response ( field = "address" , data = address , request_type = "ad_address" ) |
def force_bytes ( s , encoding = 'utf-8' , strings_only = False , errors = 'strict' ) :
"""Similar to smart _ bytes , except that lazy instances are resolved to
strings , rather than kept as lazy objects .
If strings _ only is True , don ' t convert ( some ) non - string - like objects .""" | if isinstance ( s , memoryview ) :
s = bytes ( s )
if isinstance ( s , bytes ) :
if encoding == 'utf-8' :
return s
else :
return s . decode ( 'utf-8' , errors ) . encode ( encoding , errors )
if strings_only and ( s is None or isinstance ( s , int ) ) :
return s
if not isinstance ( s , six . string_types ) :
try :
if six . PY3 :
return six . text_type ( s ) . encode ( encoding )
else :
return bytes ( s )
except UnicodeEncodeError :
if isinstance ( s , Exception ) : # An Exception subclass containing non - ASCII data that doesn ' t
# know how to print itself properly . We shouldn ' t raise a
# further exception .
return b' ' . join ( [ force_bytes ( arg , encoding , strings_only , errors ) for arg in s ] )
return six . text_type ( s ) . encode ( encoding , errors )
else :
return s . encode ( encoding , errors ) |
def from_robot ( cls , robot , ** kwargs ) :
"""Construct a Nearest Neighbor forward model from an existing dataset .""" | m = cls ( len ( robot . m_feats ) , len ( robot . s_feats ) , ** kwargs )
return m |
def features ( self ) :
"""list : Get the features stored in memory or in the GFF file""" | if self . feature_file :
log . debug ( '{}: reading features from feature file {}' . format ( self . id , self . feature_path ) )
with open ( self . feature_path ) as handle :
feats = list ( GFF . parse ( handle ) )
if len ( feats ) > 1 :
log . warning ( 'Too many sequences in GFF' )
else :
return feats [ 0 ] . features
else :
return self . _features |
def pfull_from_ps ( bk , pk , ps , pfull_coord ) :
"""Compute pressure at full levels from surface pressure .""" | return to_pfull_from_phalf ( phalf_from_ps ( bk , pk , ps ) , pfull_coord ) |
def attached_pane ( self ) :
"""Return the attached : class : ` Pane ` .
Returns
: class : ` Pane `""" | for pane in self . _panes :
if 'pane_active' in pane : # for now pane _ active is a unicode
if pane . get ( 'pane_active' ) == '1' :
return Pane ( window = self , ** pane )
else :
continue
return [ ] |
def _unquote_cookie ( s : str ) -> str :
"""Handle double quotes and escaping in cookie values .
This method is copied verbatim from the Python 3.5 standard
library ( http . cookies . _ unquote ) so we don ' t have to depend on
non - public interfaces .""" | # If there aren ' t any doublequotes ,
# then there can ' t be any special characters . See RFC 2109.
if s is None or len ( s ) < 2 :
return s
if s [ 0 ] != '"' or s [ - 1 ] != '"' :
return s
# We have to assume that we must decode this string .
# Down to work .
# Remove the " s
s = s [ 1 : - 1 ]
# Check for special sequences . Examples :
# \012 - - > \ n
i = 0
n = len ( s )
res = [ ]
while 0 <= i < n :
o_match = _OctalPatt . search ( s , i )
q_match = _QuotePatt . search ( s , i )
if not o_match and not q_match : # Neither matched
res . append ( s [ i : ] )
break
# else :
j = k = - 1
if o_match :
j = o_match . start ( 0 )
if q_match :
k = q_match . start ( 0 )
if q_match and ( not o_match or k < j ) : # QuotePatt matched
res . append ( s [ i : k ] )
res . append ( s [ k + 1 ] )
i = k + 2
else : # OctalPatt matched
res . append ( s [ i : j ] )
res . append ( chr ( int ( s [ j + 1 : j + 4 ] , 8 ) ) )
i = j + 4
return _nulljoin ( res ) |
def template_exists_db ( self , template ) :
"""Receives a template and checks if it exists in the database
using the template name and language""" | name = utils . camel_to_snake ( template [ 0 ] ) . upper ( )
language = utils . camel_to_snake ( template [ 3 ] )
try :
models . EmailTemplate . objects . get ( name = name , language = language )
except models . EmailTemplate . DoesNotExist :
return False
return True |
def run_command ( self , config_file ) :
""": param str config _ file : The name of config file .""" | config = configparser . ConfigParser ( )
config . read ( config_file )
rdbms = config . get ( 'database' , 'rdbms' ) . lower ( )
label_regex = config . get ( 'constants' , 'label_regex' )
constants = self . create_constants ( rdbms )
constants . main ( config_file , label_regex ) |
def _Backward3_T_Ps ( P , s ) :
"""Backward equation for region 3 , T = f ( P , s )
Parameters
P : float
Pressure , [ MPa ]
s : float
Specific entropy , [ kJ / kgK ]
Returns
T : float
Temperature , [ K ]""" | sc = 4.41202148223476
if s <= sc :
T = _Backward3a_T_Ps ( P , s )
else :
T = _Backward3b_T_Ps ( P , s )
return T |
def parse ( cls , fptr , offset , length ) :
"""Parse XML box .
Parameters
fptr : file
Open file object .
offset : int
Start position of box in bytes .
length : int
Length of the box in bytes .
Returns
XMLBox
Instance of the current XML box .""" | num_bytes = offset + length - fptr . tell ( )
read_buffer = fptr . read ( num_bytes )
if sys . hexversion < 0x03000000 and codecs . BOM_UTF8 in read_buffer : # Python3 with utf - 8 handles this just fine . Actually so does
# Python2 right here since we decode using utf - 8 . The real
# problem comes when _ _ str _ _ is used on the XML box , and that
# is where Python2 falls short because of the ascii codec .
msg = ( 'A BOM (byte order marker) was detected and ' 'removed from the XML contents in the box starting at byte ' 'offset {offset:d}.' )
msg = msg . format ( offset = offset )
warnings . warn ( msg , UserWarning )
read_buffer = read_buffer . replace ( codecs . BOM_UTF8 , b'' )
try :
text = read_buffer . decode ( 'utf-8' )
except UnicodeDecodeError as err : # Possibly bad string of bytes to begin with .
# Try to search for < ? xml and go from there .
decl_start = read_buffer . find ( b'<?xml' )
if decl_start <= - 1 : # Nope , that ' s not it . All is lost .
msg = ( 'A problem was encountered while parsing an XML box:' '\n\n\t"{error}"\n\nNo XML was retrieved.' )
warnings . warn ( msg . format ( error = str ( err ) ) , UserWarning )
return XMLBox ( xml = None , length = length , offset = offset )
text = read_buffer [ decl_start : ] . decode ( 'utf-8' )
# Let the user know that the XML box was problematic .
msg = ( 'A UnicodeDecodeError was encountered parsing an XML box ' 'at byte position {offset:d} ({reason}), but the XML was ' 'still recovered.' )
msg = msg . format ( offset = offset , reason = err . reason )
warnings . warn ( msg , UserWarning )
# Strip out any trailing nulls , as they can foul up XML parsing .
text = text . rstrip ( chr ( 0 ) )
bfptr = io . BytesIO ( text . encode ( 'utf-8' ) )
try :
xml = ET . parse ( bfptr )
except ET . ParseError as err :
msg = ( 'A problem was encountered while parsing an XML box:' '\n\n\t"{reason}"\n\nNo XML was retrieved.' )
msg = msg . format ( reason = str ( err ) )
warnings . warn ( msg , UserWarning )
xml = None
return cls ( xml = xml , length = length , offset = offset ) |
def _is_non_string_iterable ( value ) :
"""Whether a value is iterable .""" | if isinstance ( value , str ) :
return False
if hasattr ( value , '__iter__' ) :
return True
if isinstance ( value , collections . abc . Sequence ) :
return True
return False |
def mean ( self ) -> Optional [ float ] :
"""Statistical mean of all values entered into histogram .
This number is precise , because we keep the necessary data
separate from bin contents .""" | if self . _stats : # TODO : should be true always ?
if self . total > 0 :
return self . _stats [ "sum" ] / self . total
else :
return np . nan
else :
return None |
def strip_prompt ( self , a_string ) :
"""Strip ' Done ' from command output""" | output = super ( NetscalerSSH , self ) . strip_prompt ( a_string )
lines = output . split ( self . RESPONSE_RETURN )
if "Done" in lines [ - 1 ] :
return self . RESPONSE_RETURN . join ( lines [ : - 1 ] )
else :
return output |
def parse_inline_styles ( self , data = None , import_type = 'string' ) :
"""Function for parsing styles defined in the body of the document .
This only includes data inside of HTML < style > tags , a URL , or file to open .""" | if data is None :
raise
parser = cssutils . CSSParser ( )
if import_type == 'string' : # print " importing string with url = % s " % self . url _ root
sheet = parser . parseString ( data , href = self . url_root )
elif import_type == 'url' :
if data [ : 5 ] . lower ( ) == 'http:' or data [ : 6 ] . lower ( ) == 'https:' :
print "YES because it was: %s " % data [ : 5 ] . lower ( )
try :
sheet = parser . parseUrl ( data )
except :
sys . stderr . write ( "WARNING: Failed attempting to parse %s" % data )
return
elif import_type == 'file' :
sheet = parser . parseFile ( data )
else :
raise
hrefs = [ ]
for i in range ( len ( sheet . cssRules ) ) :
if sheet . cssRules [ i ] . type == cssutils . css . CSSStyleRule . STYLE_RULE :
selector = sheet . cssRules [ i ] . selectorText
# print " cssparser found selector : % s " % selector
selectors = selector . split ( ',' )
self . defined_classes . extend ( selectors )
elif ( self . follow_css_links == True and sheet . cssRules [ i ] . type == cssutils . css . CSSStyleRule . IMPORT_RULE ) :
href = sheet . cssRules [ i ] . href
sys . stderr . write ( "Added %s to the stylesheets to crawl" % href )
if href [ : 5 ] . lower ( ) == 'http:' or href [ : 6 ] . lower ( ) == 'https:' :
self . linked_sheets . append ( href )
else : # We ' ll have to try to add in a url root here , if these are relative
# links .
self . linked_sheets . append ( self . url_root + href )
self . parse_inline_styles ( data = self . url_root + href , import_type = 'url' )
else : # We won ' t worry about the other rule types .
pass |
def pixel ( self , coord_x , coord_y ) : # type : ( int , int ) - > Pixel
"""Returns the pixel value at a given position .
: param int coord _ x : The x coordinate .
: param int coord _ y : The y coordinate .
: return tuple : The pixel value as ( R , G , B ) .""" | try :
return self . pixels [ coord_y ] [ coord_x ]
# type : ignore
except IndexError :
raise ScreenShotError ( "Pixel location ({}, {}) is out of range." . format ( coord_x , coord_y ) ) |
def overlap ( self , query , subject ) :
"""Accessory function to check if two ranges overlap""" | if ( self . pt_within ( query [ 0 ] , subject ) or self . pt_within ( query [ 1 ] , subject ) or self . pt_within ( subject [ 0 ] , query ) or self . pt_within ( subject [ 1 ] , query ) ) :
return True
return False |
def merge ( self , other , forceMerge = False ) :
"""Merge two reads by concatenating their sequence data and their
quality data ( < self > first , then < other > ) ; < self > and < other > must have
the same sequence name . A new merged FastqSequence object is returned ;
< Self > and < other > are left unaltered .
: param other : the other sequence to merge with self .
: param forceMerge : force the merge to occur , even if sequences names
don ' t match . In this case , < self > takes precedence .
: return : A new FastqSequence that represents the merging of < self > and
< other >
: raise : FastqSequenceError if the sequences names do not match , and the
forceMerge parameter is not set .""" | if self . sequenceName != other . sequenceName and not forceMerge :
raise NGSReadError ( "cannot merge " + self . sequenceName + " with " + other . sequenceName + " -- different " + "sequence names" )
name = self . sequenceName
seq = self . sequenceData + other . sequenceData
qual = self . sequenceQual + other . sequenceQual
return NGSReadError ( name , seq , qual ) |
def mask_distance ( image , voxelspacing = None , mask = slice ( None ) ) :
r"""Computes the distance of each point under the mask to the mask border taking the
voxel - spacing into account .
Note that this feature is independent of the actual image content , but depends
solely the mask image . Therefore always a one - dimensional feature is returned ,
even if a multi - spectral image has been supplied .
If no mask has been supplied , the distances to the image borders are returned .
Parameters
image : array _ like or list / tuple of array _ like
A single image or a list / tuple of images ( for multi - spectral case ) .
voxelspacing : sequence of floats
The side - length of each voxel .
mask : array _ like
A binary mask for the image .
Returns
mask _ distance : ndarray
Each voxels distance to the mask borders .""" | if type ( image ) == tuple or type ( image ) == list :
image = image [ 0 ]
return _extract_mask_distance ( image , mask = mask , voxelspacing = voxelspacing ) |
def insert ( self , schema , fields , ** kwargs ) :
"""Persist d into the db
schema - - Schema ( )
fields - - dict - - the values to persist
return - - int - - the primary key of the row just inserted""" | r = 0
with self . connection ( ** kwargs ) as connection :
kwargs [ 'connection' ] = connection
try :
with self . transaction ( ** kwargs ) :
r = self . _insert ( schema , fields , ** kwargs )
except Exception as e :
exc_info = sys . exc_info ( )
if self . handle_error ( schema , e , ** kwargs ) :
r = self . _insert ( schema , fields , ** kwargs )
else :
self . raise_error ( e , exc_info )
return r |
def set_trafo_costs ( network , args , cost110_220 = 7500 , cost110_380 = 17333 , cost220_380 = 14166 ) :
"""Set capital costs for extendable transformers in respect
to PyPSA [ € / MVA ]
Parameters
network : : class : ` pypsa . Network
Overall container of PyPSA
cost110_220 : capital costs for 110/220kV transformer
default : 7500 € / MVA , source : costs for extra trafo in
dena Verteilnetzstudie , p . 146 ; S of trafo used in osmTGmod
cost110_380 : capital costs for 110/380kV transformer
default : 17333 € / MVA , source : NEP 2025
cost220_380 : capital costs for 220/380kV transformer
default : 14166 € / MVA , source : NEP 2025""" | network . transformers [ "v_nom0" ] = network . transformers . bus0 . map ( network . buses . v_nom )
network . transformers [ "v_nom1" ] = network . transformers . bus1 . map ( network . buses . v_nom )
network . transformers . loc [ ( network . transformers . v_nom0 == 110 ) & ( network . transformers . v_nom1 == 220 ) , 'capital_cost' ] = cost110_220 / args [ 'branch_capacity_factor' ] [ 'HV' ]
network . transformers . loc [ ( network . transformers . v_nom0 == 110 ) & ( network . transformers . v_nom1 == 380 ) , 'capital_cost' ] = cost110_380 / args [ 'branch_capacity_factor' ] [ 'HV' ]
network . transformers . loc [ ( network . transformers . v_nom0 == 220 ) & ( network . transformers . v_nom1 == 380 ) , 'capital_cost' ] = cost220_380 / args [ 'branch_capacity_factor' ] [ 'eHV' ]
return network |
def listFileParentsByLumi ( self , block_name = '' , logical_file_name = [ ] ) :
"""required parameter : block _ name
returns : [ { child _ parent _ id _ list : [ ( cid1 , pid1 ) , ( cid2 , pid2 ) , . . . ( cidn , pidn ) ] } ]""" | # self . logger . debug ( " lfn % s , block _ name % s " % ( logical _ file _ name , block _ name ) )
if not block_name :
dbsExceptionHandler ( 'dbsException-invalid-input' , "Child block_name is required for fileparents/listFileParentsByLumi api" , self . logger . exception )
with self . dbi . connection ( ) as conn :
sqlresult = self . fileparentbylumi . execute ( conn , block_name , logical_file_name )
return [ { "child_parent_id_list" : sqlresult } ] |
def set ( self , data = None ) :
"""Sets the event""" | self . __data = data
self . __exception = None
self . __event . set ( ) |
def add_edge ( self , source , target , interaction = '-' , directed = True , dataframe = True ) :
"""Add a single edge from source to target .""" | new_edge = { 'source' : source , 'target' : target , 'interaction' : interaction , 'directed' : directed }
return self . add_edges ( [ new_edge ] , dataframe = dataframe ) |
async def process_feed ( self , url , send_mentions = True ) :
"""process a feed""" | self . _feed_domains . add ( utils . get_domain ( url ) )
if url in self . _processed_feeds :
LOGGER . debug ( "Skipping already processed feed %s" , url )
return
self . _processed_feeds . add ( url )
LOGGER . debug ( "++WAIT: %s: get feed" , url )
feed , previous , updated = await feeds . get_feed ( self , url )
LOGGER . debug ( "++DONE: %s: get feed" , url )
if updated :
LOGGER . info ( "Feed %s has been updated" , url )
if not feed :
return
LOGGER . debug ( "--- starting process_feed %s %s" , url , send_mentions )
pending = [ ]
try :
for link in feed . links :
href = link [ 'href' ]
if not href :
continue
# RFC5005 archive links
if self . args . archive and link . get ( 'rel' ) in ( 'prev-archive' , 'next-archive' , 'prev-page' , 'next-page' ) :
LOGGER . debug ( "Found archive link %s" , link )
pending . append ( ( "process feed " + href , self . process_feed ( href , send_mentions ) ) )
# WebSub notification
if updated and link . get ( 'rel' ) == 'hub' and not feed . is_archive :
LOGGER . debug ( "Found WebSub hub %s" , link )
pending . append ( ( "update websub " + href , feed . update_websub ( self , href ) ) )
except ( AttributeError , KeyError ) :
LOGGER . debug ( "Feed %s has no links" , url )
# Schedule the entries
items = set ( feed . entry_links )
if previous :
items |= set ( previous . entry_links )
for entry in items :
pending . append ( ( "process entry " + entry , self . process_entry ( entry , send_mentions = send_mentions ) ) )
LOGGER . debug ( "--- finish process_feed %s %s" , url , send_mentions )
if pending :
LOGGER . debug ( "+++WAIT: process_feed(%s): %d subtasks" , url , len ( pending ) )
LOGGER . debug ( "%s" , [ name for ( name , _ ) in pending ] )
await asyncio . wait ( [ task for ( _ , task ) in pending ] )
LOGGER . debug ( "+++DONE: process_feed(%s): %d subtasks" , url , len ( pending ) ) |
def stdev ( self ) :
"""- > # float : func : numpy . std of the timing intervals""" | return round ( np . std ( self . array ) , self . precision ) if len ( self . array ) else None |
def get ( self , section_name , key_name , ) :
"""Replicate configparser . get ( ) functionality
Args :
section _ name ( str ) : section name in config
key _ name ( str ) : key name in config . section _ name
Returns :
str : do not check defaults , only return local value
Raises :
KeyError : unable to find option in either local or global config""" | value = None
try :
value = self . local_config . get ( section_name , key_name )
except Exception as error_msg :
self . logger . warning ( '%s.%s not found in local config' , section_name , key_name )
try :
value = self . global_config . get ( section_name , key_name )
except Exception as error_msg :
self . logger . error ( '%s.%s not found in global config' , section_name , key_name )
raise KeyError ( 'Could not find option in local/global config' )
return value |
def verify ( password , encoded ) :
"""Verify a Password
: param password :
: param encoded :
: return : True or False""" | algorithm , iterations , salt , h = split ( encoded )
to_verify = encode ( password , algorithm , salt , int ( iterations ) )
return hmac . compare_digest ( to_verify . encode ( ) , encoded . encode ( ) ) |
def validate_attr ( self , append ) :
"""validate that we have the same order as the existing & same dtype""" | if append :
existing_fields = getattr ( self . attrs , self . kind_attr , None )
if ( existing_fields is not None and existing_fields != list ( self . values ) ) :
raise ValueError ( "appended items do not match existing items" " in table!" )
existing_dtype = getattr ( self . attrs , self . dtype_attr , None )
if ( existing_dtype is not None and existing_dtype != self . dtype ) :
raise ValueError ( "appended items dtype do not match existing " "items dtype in table!" ) |
def chrome_setup_view ( request ) :
"""Set up a browser - side GCM session .
This * requires * a valid login session . A " token " POST parameter is saved under the " gcm _ token "
parameter in the logged in user ' s NotificationConfig .""" | logger . debug ( request . POST )
token = None
if request . method == "POST" :
if "token" in request . POST :
token = request . POST . get ( "token" )
if not token :
return HttpResponse ( '{"error":"Invalid data."}' , content_type = "text/json" )
ncfg , _ = NotificationConfig . objects . get_or_create ( user = request . user )
ncfg . gcm_token = token
ncfg . save ( )
return HttpResponse ( '{"success":"Now registered."}' , content_type = "text/json" ) |
def step1 ( expnum , ccd , prefix = '' , version = 'p' , sex_thresh = _SEX_THRESHOLD , wave_thresh = _WAVE_THRESHOLD , maxcount = _MAX_COUNT , dry_run = False ) :
"""run the actual step1jmp / matt codes .
expnum : the CFHT expousre to process
ccd : which ccd in the mosaic to process
fwhm : the image quality , FWHM , of the image . In pixels .
sex _ thresh : the detection threhold to run sExtractor at
wave _ thresh : the detection threshold for wavelet
maxcount : saturation level""" | storage . get_file ( expnum , ccd , prefix = prefix , version = version , ext = 'mopheader' )
filename = storage . get_image ( expnum , ccd , version = version , prefix = prefix )
fwhm = storage . get_fwhm ( expnum , ccd , prefix = prefix , version = version )
basename = os . path . splitext ( filename ) [ 0 ]
logging . info ( util . exec_prog ( [ 'step1jmp' , '-f' , basename , '-t' , str ( wave_thresh ) , '-w' , str ( fwhm ) , '-m' , str ( maxcount ) ] ) )
obj_uri = storage . get_uri ( expnum , ccd , version = version , ext = 'obj.jmp' , prefix = prefix )
obj_filename = basename + ".obj.jmp"
if not dry_run :
storage . copy ( obj_filename , obj_uri )
# # for step1matt we need the weight image
hdulist = fits . open ( filename )
flat_name = hdulist [ 0 ] . header . get ( 'FLAT' , 'weight.fits' )
parts = os . path . splitext ( flat_name )
if parts [ 1 ] == '.fz' :
flat_name = os . path . splitext ( parts [ 0 ] ) [ 0 ]
else :
flat_name = parts [ 0 ]
try :
flat_filename = storage . get_image ( flat_name , ccd , version = '' , ext = 'fits' , subdir = 'calibrators' )
except :
flat_filename = storage . get_image ( flat_name , ccd , version = '' , ext = 'fits' , subdir = 'old_calibrators' )
if os . access ( 'weight.fits' , os . R_OK ) :
os . unlink ( 'weight.fits' )
if not os . access ( 'weight.fits' , os . R_OK ) :
os . symlink ( flat_filename , 'weight.fits' )
logging . info ( util . exec_prog ( [ 'step1matt' , '-f' , basename , '-t' , str ( sex_thresh ) , '-w' , str ( fwhm ) , '-m' , str ( maxcount ) ] ) )
if os . access ( 'weight.fits' , os . R_OK ) :
os . unlink ( 'weight.fits' )
obj_uri = storage . get_uri ( expnum , ccd , version = version , ext = 'obj.matt' , prefix = prefix )
obj_filename = basename + ".obj.matt"
if not dry_run :
storage . copy ( obj_filename , obj_uri )
return True |
def get_corresponding_lineno ( self , lineno ) :
"""Return the source line number of a line number in the
generated bytecode as they are not in sync .""" | for template_line , code_line in reversed ( self . debug_info ) :
if code_line <= lineno :
return template_line
return 1 |
def get_all_tasks ( self , course ) :
""": return : a table containing taskid = > Task pairs""" | tasks = self . get_readable_tasks ( course )
output = { }
for task in tasks :
try :
output [ task ] = self . get_task ( course , task )
except :
pass
return output |
def _prefix_from_prefix_string ( cls , prefixlen_str ) :
"""Return prefix length from a numeric string
Args :
prefixlen _ str : The string to be converted
Returns :
An integer , the prefix length .
Raises :
NetmaskValueError : If the input is not a valid netmask""" | # int allows a leading + / - as well as surrounding whitespace ,
# so we ensure that isn ' t the case
if not _BaseV4 . _DECIMAL_DIGITS . issuperset ( prefixlen_str ) :
cls . _report_invalid_netmask ( prefixlen_str )
try :
prefixlen = int ( prefixlen_str )
except ValueError :
cls . _report_invalid_netmask ( prefixlen_str )
if not ( 0 <= prefixlen <= cls . _max_prefixlen ) :
cls . _report_invalid_netmask ( prefixlen_str )
return prefixlen |
def _watch ( self ) :
"""Start an asynchronous watch against this model .
See : meth : ` add _ observer ` to register an onchange callback .""" | async def _all_watcher ( ) :
try :
allwatcher = client . AllWatcherFacade . from_connection ( self . connection ( ) )
while not self . _watch_stopping . is_set ( ) :
try :
results = await utils . run_with_interrupt ( allwatcher . Next ( ) , self . _watch_stopping , loop = self . _connector . loop )
except JujuAPIError as e :
if 'watcher was stopped' not in str ( e ) :
raise
if self . _watch_stopping . is_set ( ) : # this shouldn ' t ever actually happen , because
# the event should trigger before the controller
# has a chance to tell us the watcher is stopped
# but handle it gracefully , just in case
break
# controller stopped our watcher for some reason
# but we ' re not actually stopping , so just restart it
log . warning ( 'Watcher: watcher stopped, restarting' )
del allwatcher . Id
continue
except websockets . ConnectionClosed :
monitor = self . connection ( ) . monitor
if monitor . status == monitor . ERROR : # closed unexpectedly , try to reopen
log . warning ( 'Watcher: connection closed, reopening' )
await self . connection ( ) . reconnect ( )
if monitor . status != monitor . CONNECTED : # reconnect failed ; abort and shutdown
log . error ( 'Watcher: automatic reconnect ' 'failed; stopping watcher' )
break
del allwatcher . Id
continue
else : # closed on request , go ahead and shutdown
break
if self . _watch_stopping . is_set ( ) :
try :
await allwatcher . Stop ( )
except websockets . ConnectionClosed :
pass
# can ' t stop on a closed conn
break
for delta in results . deltas :
try :
delta = get_entity_delta ( delta )
old_obj , new_obj = self . state . apply_delta ( delta )
await self . _notify_observers ( delta , old_obj , new_obj )
except KeyError as e :
log . debug ( "unknown delta type: %s" , e . args [ 0 ] )
self . _watch_received . set ( )
except CancelledError :
pass
except Exception :
log . exception ( 'Error in watcher' )
raise
finally :
self . _watch_stopped . set ( )
log . debug ( 'Starting watcher task' )
self . _watch_received . clear ( )
self . _watch_stopping . clear ( )
self . _watch_stopped . clear ( )
self . _connector . loop . create_task ( _all_watcher ( ) ) |
def replace ( needle , with_ = None , in_ = None ) :
"""Replace occurrences of string ( s ) with other string ( s ) in ( a ) string ( s ) .
Unlike the built in : meth : ` str . replace ` method , this function provides
clean API that clearly distinguishes the " needle " ( string to replace ) ,
the replacement string , and the target string to perform replacement in
( the " haystack " ) .
Additionally , a simultaneous replacement of several needles is possible .
Note that this is different from performing multiple separate replacements
one after another .
Examples : :
replace ( ' foo ' , with _ = ' bar ' , in _ = some _ text )
replace ( ' foo ' , with _ = ' bar ' ) . in _ ( other _ text )
replace ( ' foo ' ) . with _ ( ' bar ' ) . in _ ( another _ text )
replace ( [ ' foo ' , ' bar ' ] ) . with _ ( ' baz ' ) . in _ ( perhaps _ a _ long _ text )
replace ( { ' foo ' : ' bar ' , ' baz ' : ' qud ' } ) . in _ ( even _ longer _ text )
: param needle : String to replace , iterable thereof ,
or a mapping from needles to corresponding replacements
: param with _ : Replacement string , if ` ` needle ` ` was not a mapping
: param in _ : Optional string to perform replacement in
: return : If all parameters were provided , result is the final string
after performing a specified replacement .
Otherwise , a : class : ` Replacer ` object is returned , allowing
e . g . to perform the same replacements in many haystacks .""" | if needle is None :
raise TypeError ( "replacement needle cannot be None" )
if not needle :
raise ValueError ( "replacement needle cannot be empty" )
if is_string ( needle ) :
replacer = Replacer ( ( needle , ) )
else :
ensure_iterable ( needle )
if not is_mapping ( needle ) :
if all ( imap ( is_pair , needle ) ) :
needle = dict ( needle )
elif not all ( imap ( is_string , needle ) ) :
raise TypeError ( "invalid replacement needle" )
replacer = Replacer ( needle )
if with_ is not None :
ensure_string ( with_ )
replacer = replacer . with_ ( with_ )
if in_ is not None :
ensure_string ( in_ )
return replacer . in_ ( in_ )
return replacer |
def do_connect ( self , arg ) :
'''Connect to the arm .''' | if self . arm . is_connected ( ) :
print ( self . style . error ( 'Error: ' , 'Arm is already connected.' ) )
else :
try :
port = self . arm . connect ( )
print ( self . style . success ( 'Success: ' , 'Connected to \'{}\'.' . format ( port ) ) )
except r12 . ArmException as e :
print ( self . style . error ( 'Error: ' , str ( e ) ) ) |
def _extract_readnum ( read_dict ) :
"""Extract read numbers from old - style fastqs .
Handles read 1 and 2 specifications where naming is
readname / 1 readname / 2""" | pat = re . compile ( r"(?P<readnum>/\d+)$" )
parts = pat . split ( read_dict [ "name" ] )
if len ( parts ) == 3 :
name , readnum , endofline = parts
read_dict [ "name" ] = name
read_dict [ "readnum" ] = readnum
else :
read_dict [ "readnum" ] = ""
return read_dict |
def set_device_offset ( self , x_offset , y_offset ) :
"""Sets an offset that is added to the device coordinates
determined by the CTM when drawing to surface .
One use case for this method is
when we want to create a : class : ` Surface ` that redirects drawing
for a portion of an onscreen surface
to an offscreen surface in a way that is
completely invisible to the user of the cairo API .
Setting a transformation via : meth : ` Context . translate `
isn ' t sufficient to do this ,
since methods like : meth : ` Context . device _ to _ user `
will expose the hidden offset .
Note that the offset affects drawing to the surface
as well as using the surface in a source pattern .
: param x _ offset :
The offset in the X direction , in device units
: param y _ offset :
The offset in the Y direction , in device units""" | cairo . cairo_surface_set_device_offset ( self . _pointer , x_offset , y_offset )
self . _check_status ( ) |
def close_authenticator ( self ) :
"""Close the CBS auth channel and session .""" | _logger . info ( "Shutting down CBS session on connection: %r." , self . _connection . container_id )
try :
_logger . debug ( "Unlocked CBS to close on connection: %r." , self . _connection . container_id )
self . _cbs_auth . destroy ( )
_logger . info ( "Auth closed, destroying session on connection: %r." , self . _connection . container_id )
self . _session . destroy ( )
finally :
_logger . info ( "Finished shutting down CBS session on connection: %r." , self . _connection . container_id ) |
def setup_datafind_from_pregenerated_lcf_files ( cp , ifos , outputDir , tags = None ) :
"""This function is used if you want to run with pregenerated lcf frame
cache files .
Parameters
cp : ConfigParser . ConfigParser instance
This contains a representation of the information stored within the
workflow configuration files
ifos : list of ifo strings
List of ifos to get pregenerated files for .
outputDir : path
All output files written by datafind processes will be written to this
directory . Currently this sub - module writes no output .
tags : list of strings , optional ( default = None )
Use this to specify tags . This can be used if this module is being
called more than once to give call specific configuration ( by setting
options in [ workflow - datafind - $ { TAG } ] rather than [ workflow - datafind ] ) .
This is also used to tag the Files returned by the class to uniqueify
the Files and uniqueify the actual filename .
Returns
datafindcaches : list of glue . lal . Cache instances
The glue . lal . Cache representations of the various calls to the datafind
server and the returned frame files .
datafindOuts : pycbc . workflow . core . FileList
List of all the datafind output files for use later in the pipeline .""" | if tags is None :
tags = [ ]
datafindcaches = [ ]
for ifo in ifos :
search_string = "datafind-pregenerated-cache-file-%s" % ( ifo . lower ( ) , )
frame_cache_file_name = cp . get_opt_tags ( "workflow-datafind" , search_string , tags = tags )
curr_cache = lal . Cache . fromfilenames ( [ frame_cache_file_name ] , coltype = lal . LIGOTimeGPS )
curr_cache . ifo = ifo
datafindcaches . append ( curr_cache )
datafindouts = convert_cachelist_to_filelist ( datafindcaches )
return datafindcaches , datafindouts |
def transformWith ( self , func , other , keepSerializer = False ) :
"""Return a new DStream in which each RDD is generated by applying a function
on each RDD of this DStream and ' other ' DStream .
` func ` can have two arguments of ( ` rdd _ a ` , ` rdd _ b ` ) or have three
arguments of ( ` time ` , ` rdd _ a ` , ` rdd _ b ` )""" | if func . __code__ . co_argcount == 2 :
oldfunc = func
func = lambda t , a , b : oldfunc ( a , b )
assert func . __code__ . co_argcount == 3 , "func should take two or three arguments"
jfunc = TransformFunction ( self . _sc , func , self . _jrdd_deserializer , other . _jrdd_deserializer )
dstream = self . _sc . _jvm . PythonTransformed2DStream ( self . _jdstream . dstream ( ) , other . _jdstream . dstream ( ) , jfunc )
jrdd_serializer = self . _jrdd_deserializer if keepSerializer else self . _sc . serializer
return DStream ( dstream . asJavaDStream ( ) , self . _ssc , jrdd_serializer ) |
def overall_statistics ( RACC , RACCU , TPR , PPV , TP , FN , FP , POP , P , TOP , jaccard_list , CEN_dict , MCEN_dict , AUC_dict , classes , table ) :
"""Return overall statistics .
: param RACC : random accuracy
: type RACC : dict
: param TPR : sensitivity , recall , hit rate , or true positive rate
: type TPR : dict
: param PPV : precision or positive predictive value
: type PPV : dict
: param TP : true positive
: type TP : dict
: param FN : false negative
: type FN : dict
: param FP : false positive
: type FP : dict
: param POP : population
: type POP : dict
: param P : condition positive
: type P : dict
: param POP : population
: type POP : dict
: param TOP : test outcome positive
: type TOP : dict
: param jaccard _ list : list of jaccard index for each class
: type jaccard _ list : list
: param CEN _ dict : CEN dictionary for each class
: type CEN _ dict : dict
: param classes : confusion matrix classes
: type classes : list
: param table : input matrix
: type table : dict
: return : overall statistics as dict""" | population = list ( POP . values ( ) ) [ 0 ]
overall_accuracy = overall_accuracy_calc ( TP , population )
overall_random_accuracy_unbiased = overall_random_accuracy_calc ( RACCU )
overall_random_accuracy = overall_random_accuracy_calc ( RACC )
overall_kappa = reliability_calc ( overall_random_accuracy , overall_accuracy )
PC_PI = PC_PI_calc ( P , TOP , POP )
PC_AC1 = PC_AC1_calc ( P , TOP , POP )
PC_S = PC_S_calc ( classes )
PI = reliability_calc ( PC_PI , overall_accuracy )
AC1 = reliability_calc ( PC_AC1 , overall_accuracy )
S = reliability_calc ( PC_S , overall_accuracy )
kappa_SE = kappa_se_calc ( overall_accuracy , overall_random_accuracy , population )
kappa_unbiased = reliability_calc ( overall_random_accuracy_unbiased , overall_accuracy )
kappa_no_prevalence = kappa_no_prevalence_calc ( overall_accuracy )
kappa_CI = CI_calc ( overall_kappa , kappa_SE )
overall_accuracy_se = se_calc ( overall_accuracy , population )
overall_accuracy_CI = CI_calc ( overall_accuracy , overall_accuracy_se )
chi_squared = chi_square_calc ( classes , table , TOP , P , POP )
phi_squared = phi_square_calc ( chi_squared , population )
cramer_V = cramers_V_calc ( phi_squared , classes )
response_entropy = entropy_calc ( TOP , POP )
reference_entropy = entropy_calc ( P , POP )
cross_entropy = cross_entropy_calc ( TOP , P , POP )
join_entropy = joint_entropy_calc ( classes , table , POP )
conditional_entropy = conditional_entropy_calc ( classes , table , P , POP )
mutual_information = mutual_information_calc ( response_entropy , conditional_entropy )
kl_divergence = kl_divergence_calc ( P , TOP , POP )
lambda_B = lambda_B_calc ( classes , table , TOP , population )
lambda_A = lambda_A_calc ( classes , table , P , population )
DF = DF_calc ( classes )
overall_jaccard_index = overall_jaccard_index_calc ( list ( jaccard_list . values ( ) ) )
hamming_loss = hamming_calc ( TP , population )
zero_one_loss = zero_one_loss_calc ( TP , population )
NIR = NIR_calc ( P , population )
p_value = p_value_calc ( TP , population , NIR )
overall_CEN = overall_CEN_calc ( classes , TP , TOP , P , CEN_dict )
overall_MCEN = overall_CEN_calc ( classes , TP , TOP , P , MCEN_dict , True )
overall_MCC = overall_MCC_calc ( classes , table , TOP , P )
RR = RR_calc ( classes , TOP )
CBA = CBA_calc ( classes , table , TOP , P )
AUNU = macro_calc ( AUC_dict )
AUNP = AUNP_calc ( classes , P , POP , AUC_dict )
RCI = RCI_calc ( mutual_information , reference_entropy )
C = pearson_C_calc ( chi_squared , population )
return { "Overall ACC" : overall_accuracy , "Kappa" : overall_kappa , "Overall RACC" : overall_random_accuracy , "SOA1(Landis & Koch)" : kappa_analysis_koch ( overall_kappa ) , "SOA2(Fleiss)" : kappa_analysis_fleiss ( overall_kappa ) , "SOA3(Altman)" : kappa_analysis_altman ( overall_kappa ) , "SOA4(Cicchetti)" : kappa_analysis_cicchetti ( overall_kappa ) , "TPR Macro" : macro_calc ( TPR ) , "PPV Macro" : macro_calc ( PPV ) , "TPR Micro" : micro_calc ( TP = TP , item = FN ) , "PPV Micro" : micro_calc ( TP = TP , item = FP ) , "Scott PI" : PI , "Gwet AC1" : AC1 , "Bennett S" : S , "Kappa Standard Error" : kappa_SE , "Kappa 95% CI" : kappa_CI , "Chi-Squared" : chi_squared , "Phi-Squared" : phi_squared , "Cramer V" : cramer_V , "Chi-Squared DF" : DF , "95% CI" : overall_accuracy_CI , "Standard Error" : overall_accuracy_se , "Response Entropy" : response_entropy , "Reference Entropy" : reference_entropy , "Cross Entropy" : cross_entropy , "Joint Entropy" : join_entropy , "Conditional Entropy" : conditional_entropy , "KL Divergence" : kl_divergence , "Lambda B" : lambda_B , "Lambda A" : lambda_A , "Kappa Unbiased" : kappa_unbiased , "Overall RACCU" : overall_random_accuracy_unbiased , "Kappa No Prevalence" : kappa_no_prevalence , "Mutual Information" : mutual_information , "Overall J" : overall_jaccard_index , "Hamming Loss" : hamming_loss , "Zero-one Loss" : zero_one_loss , "NIR" : NIR , "P-Value" : p_value , "Overall CEN" : overall_CEN , "Overall MCEN" : overall_MCEN , "Overall MCC" : overall_MCC , "RR" : RR , "CBA" : CBA , "AUNU" : AUNU , "AUNP" : AUNP , "RCI" : RCI , "Pearson C" : C } |
def arguments ( function , extra_arguments = 0 ) :
"""Returns the name of all arguments a function takes""" | if not hasattr ( function , '__code__' ) :
return ( )
return function . __code__ . co_varnames [ : function . __code__ . co_argcount + extra_arguments ] |
def rm_pawn ( self , name , * args ) :
"""Remove the : class : ` Pawn ` by the given name .""" | if name not in self . pawn :
raise KeyError ( "No Pawn named {}" . format ( name ) )
# Currently there ' s no way to connect Pawns with Arrows but I
# think there will be , so , insurance
self . rm_arrows_to_and_from ( name )
pwn = self . pawn . pop ( name )
if pwn in self . selection_candidates :
self . selection_candidates . remove ( pwn )
pwn . parent . remove_widget ( pwn ) |
def get_parse ( self , show = True , proxy = None , timeout = 0 ) :
"""GET MediaWiki : API action = parse request
https : / / en . wikipedia . org / w / api . php ? action = help & modules = parse
Required { params } : title OR pageid
- title : < str > article title
- pageid : < int > Wikipedia database ID
Optional arguments :
- [ show ] : < bool > echo page data if true
- [ proxy ] : < str > use this HTTP proxy
- [ timeout ] : < int > timeout in seconds ( 0 = wait forever )
Data captured :
- image : < dict > { parse - image , parse - cover }
- infobox : < dict > Infobox data as python dictionary
- iwlinks : < list > interwiki links
- pageid : < int > Wikipedia database ID
- parsetree : < str > XML parse tree
- requests : list of request actions made
- wikibase : < str > Wikidata entity ID or wikidata URL
- wikitext : < str > raw wikitext URL""" | if not self . params . get ( 'title' ) and not self . params . get ( 'pageid' ) :
raise ValueError ( "get_parse needs title or pageid" )
self . _get ( 'parse' , show , proxy , timeout )
return self |
def create ( self , argv ) :
"""Create a search job .""" | opts = cmdline ( argv , FLAGS_CREATE )
if len ( opts . args ) != 1 :
error ( "Command requires a search expression" , 2 )
query = opts . args [ 0 ]
job = self . service . jobs . create ( opts . args [ 0 ] , ** opts . kwargs )
print ( job . sid ) |
def build ( self ) :
"""The decoder computational graph consists of three components :
(1 ) the input node ` decoder _ input `
(2 ) the embedding node ` decoder _ embed `
(3 ) the recurrent ( RNN ) part ` decoder _ rnn `
(4 ) the output of the decoder RNN ` decoder _ output `
(5 ) the classification output layer ` decoder _ dense `""" | # Grab hyperparameters from self . config :
hidden_dim = self . config [ 'encoding-layer-width' ]
recurrent_unit = self . config [ 'recurrent-unit-type' ]
bidirectional = False
# self . config [ ' encoding - layer - bidirectional ' ]
vocab_size = self . data . properties . vocab_size
embedding_dim = math . ceil ( math . log ( vocab_size , 2 ) )
# self . config [ ' embedding - dim ' ]
input_length = self . data . properties [ 'max-utterance-length' ] + 1
# Assemble the network components :
decoder_input = Input ( shape = ( None , ) )
decoder_embed = Embedding ( vocab_size , embedding_dim , mask_zero = True ) ( decoder_input )
# , input _ length = input _ length ) ( decoder _ input )
if recurrent_unit == 'lstm' :
decoder_rnn = LSTM ( hidden_dim , return_sequences = True , return_state = True )
decoder_output , decoder_h , decoder_c = decoder_rnn ( decoder_embed , initial_state = self . encoder . encoder_hidden_state )
elif recurrent_unit == 'gru' :
decoder_rnn = GRU ( hidden_dim , return_sequences = True , return_state = True )
decoder_output , _ = decoder_rnn ( decoder_embed , initial_state = self . encoder . encoder_hidden_state )
else :
raise Exception ( 'Invalid recurrent unit type: {}' . format ( recurrent_unit ) )
# make the RNN component bidirectional , if desired
if bidirectional :
decoder_rnn = Bidirectional ( decoder_rnn , merge_mode = 'ave' )
decoder_dense = Dense ( vocab_size , activation = 'softmax' )
decoder_output = decoder_dense ( decoder_output )
# save the four Decoder components as class state
self . decoder_input = decoder_input
self . decoder_embed = decoder_embed
self . decoder_rnn = decoder_rnn
self . decoder_dense = decoder_dense
self . decoder_output = decoder_output
return |
def wrap ( self , req , result ) :
"""Wrap method return results . The return value of the action
method and of the action extensions is passed through this
method before being returned to the caller . Instances of
` webob . Response ` are thrown , to abort the rest of action and
extension processing ; otherwise , objects which are not
instances of ResponseObject will be wrapped in one .""" | if isinstance ( result , webob . exc . HTTPException ) : # It ' s a webob HTTP exception ; use raise to bail out
# immediately and pass it upstream
raise result
elif isinstance ( result , webob . Response ) : # Straight - up webob Response object ; we raise
# AppathyResponse to bail out
raise exceptions . AppathyResponse ( result )
elif isinstance ( result , response . ResponseObject ) : # Already a ResponseObject ; bind it to this descriptor
result . _bind ( self )
return result
else : # Create a new , bound , ResponseObject
return self . resp_type ( req , result , _descriptor = self ) |
def app_main ( self , experiment = None , last = False , new = False , verbose = False , verbosity_level = None , no_modification = False , match = False ) :
"""The main function for parsing global arguments
Parameters
experiment : str
The id of the experiment to use
last : bool
If True , the last experiment is used
new : bool
If True , a new experiment is created
verbose : bool
Increase the verbosity level to DEBUG . See also ` verbosity _ level `
for a more specific determination of the verbosity
verbosity _ level : str or int
The verbosity level to use . Either one of ` ` ' DEBUG ' , ' INFO ' ,
' WARNING ' , ' ERROR ' ` ` or the corresponding integer ( see pythons
logging module )
no _ modification : bool
If True / set , no modifications in the configuration files will be
done
match : bool
If True / set , interprete ` experiment ` as a regular expression
( regex ) und use the matching experiment""" | if match :
patt = re . compile ( experiment )
matches = list ( filter ( patt . search , self . config . experiments ) )
if len ( matches ) > 1 :
raise ValueError ( "Found multiple matches for %s: %s" % ( experiment , matches ) )
elif len ( matches ) == 0 :
raise ValueError ( "No experiment matches %s" % experiment )
experiment = matches [ 0 ]
if last and self . config . experiments :
self . experiment = None
elif new and self . config . experiments :
try :
self . experiment = utils . get_next_name ( self . experiment )
except ValueError :
raise ValueError ( "Could not estimate an experiment id! Please use the " "experiment argument to provide an id." )
else :
self . _experiment = experiment
if verbose :
verbose = logging . DEBUG
elif verbosity_level :
if verbosity_level in [ 'DEBUG' , 'INFO' , 'WARNING' , 'ERROR' ] :
verbose = getattr ( logging , verbosity_level )
else :
verbose = int ( verbosity_level )
if verbose :
logging . getLogger ( utils . get_toplevel_module ( inspect . getmodule ( self ) ) ) . setLevel ( verbose )
self . logger . setLevel ( verbose )
self . no_modification = no_modification |
def setattr ( self , name , val ) :
"""Change the attribute value of the UI element . Not all attributes can be casted to text . If changing the
immutable attributes or attributes which do not exist , the InvalidOperationException exception is raised .
Args :
name : attribute name
val : new attribute value to cast
Raises :
InvalidOperationException : when it fails to set the attribute on UI element""" | nodes = self . _do_query ( multiple = False )
try :
return self . poco . agent . hierarchy . setAttr ( nodes , name , val )
except UnableToSetAttributeException as e :
raise InvalidOperationException ( '"{}" of "{}"' . format ( str ( e ) , self ) ) |
def _set_redistribute_ospf ( self , v , load = False ) :
"""Setter method for redistribute _ ospf , mapped from YANG variable / rbridge _ id / ipv6 / router / ospf / redistribute / redistribute _ ospf ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ redistribute _ ospf is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ redistribute _ ospf ( ) directly .
YANG Description : OSPF routes""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = redistribute_ospf . redistribute_ospf , is_container = 'container' , presence = True , yang_name = "redistribute-ospf" , rest_name = "ospf" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'OSPF routes' , u'alt-name' : u'ospf' } } , namespace = 'urn:brocade.com:mgmt:brocade-ospfv3' , defining_module = 'brocade-ospfv3' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """redistribute_ospf must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=redistribute_ospf.redistribute_ospf, is_container='container', presence=True, yang_name="redistribute-ospf", rest_name="ospf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'OSPF routes', u'alt-name': u'ospf'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)""" , } )
self . __redistribute_ospf = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def push_pos ( self , * args ) :
"""Set my current position , expressed as proportions of the board ' s
width and height , into the ` ` _ x ` ` and ` ` _ y ` ` keys of the
entity in my ` ` proxy ` ` property , such that it will be
recorded in the database .""" | self . proxy [ '_x' ] = self . x / self . board . width
self . proxy [ '_y' ] = self . y / self . board . height |
def AssertIterableType ( iterable , expected_item_type ) :
"""Ensures that given iterable container has certain type .
Args :
iterable : An iterable container to assert the type for .
expected _ item _ type : An expected type of the container items .
Raises :
TypeError : If given container does is not an iterable or its items do not
have the expected type .""" | # We do not consider iterators to be iterables even though Python does . An
# " iterable " should be a type that can be iterated ( that is : an iterator can
# be constructed for them ) . Iterators should not be considered to be iterable
# because it makes no sense to construct an iterator for iterator . The most
# important practical implication is that act of iterating an iterator drains
# it whereas act of iterating the iterable does not .
if isinstance ( iterable , collections . Iterator ) :
message = "Expected iterable container but got iterator `%s` instead"
message %= iterable
raise TypeError ( message )
AssertType ( iterable , collections . Iterable )
for item in iterable :
AssertType ( item , expected_item_type ) |
def __f2d ( frac_coords , v ) :
"""Converts fractional coordinates to discrete coordinates with respect to
the grid size of v""" | # frac _ coords = frac _ coords % 1
return np . array ( [ int ( frac_coords [ 0 ] * v . shape [ 0 ] ) , int ( frac_coords [ 1 ] * v . shape [ 1 ] ) , int ( frac_coords [ 2 ] * v . shape [ 2 ] ) ] ) |
def register_view ( self , view ) :
"""Called when the View was registered
Can be used e . g . to connect signals . Here , the destroy signal is connected to close the application""" | super ( StateEditorController , self ) . register_view ( view )
view . prepare_the_labels ( )
# the preparation of the labels is done here to take into account plugin hook changes
view [ 'add_input_port_button' ] . connect ( 'clicked' , self . inputs_ctrl . on_add )
view [ 'add_output_port_button' ] . connect ( 'clicked' , self . outputs_ctrl . on_add )
if isinstance ( self . model , ContainerStateModel ) :
view [ 'add_scoped_variable_button' ] . connect ( 'clicked' , self . scopes_ctrl . on_add )
view [ 'remove_input_port_button' ] . connect ( 'clicked' , self . inputs_ctrl . on_remove )
view [ 'remove_output_port_button' ] . connect ( 'clicked' , self . outputs_ctrl . on_remove )
if isinstance ( self . model , ContainerStateModel ) :
view [ 'remove_scoped_variable_button' ] . connect ( 'clicked' , self . scopes_ctrl . on_remove )
if isinstance ( self . model , LibraryStateModel ) or self . model . state . get_next_upper_library_root_state ( ) :
view [ 'add_input_port_button' ] . set_sensitive ( False )
view [ 'remove_input_port_button' ] . set_sensitive ( False )
view [ 'add_output_port_button' ] . set_sensitive ( False )
view [ 'remove_output_port_button' ] . set_sensitive ( False )
view [ 'add_scoped_variable_button' ] . set_sensitive ( False )
view [ 'remove_scoped_variable_button' ] . set_sensitive ( False )
view . inputs_view . show ( )
view . outputs_view . show ( )
view . scopes_view . show ( )
view . outcomes_view . show ( )
view . transitions_view . show ( )
view . data_flows_view . show ( )
# show scoped variables if show content is enabled - > if disabled the tab stays and indicates a container state
if isinstance ( self . model , LibraryStateModel ) and not self . model . show_content ( ) :
view . scopes_view . hide ( )
view . linkage_overview . scope_view . hide ( )
# Container states do not have a source editor and library states does not show there source code
# Thus , for those states we do not have to add the source controller and can hide the source code tab
# logger . info ( " init state : { 0 } " . format ( model ) )
lib_with_and_ES_as_root = isinstance ( self . model , LibraryStateModel ) and not isinstance ( self . model . state_copy , ContainerStateModel )
if not isinstance ( self . model , ContainerStateModel ) and not isinstance ( self . model , LibraryStateModel ) or lib_with_and_ES_as_root :
view . source_view . show ( )
if isinstance ( self . model , LibraryStateModel ) and not self . model . show_content ( ) :
view . remove_source_tab ( )
view . remove_scoped_variables_tab ( )
else :
view . scopes_view . show ( )
if isinstance ( self . model , LibraryStateModel ) and ( not self . model . show_content ( ) or not isinstance ( self . model . state_copy , ContainerStateModel ) ) :
view . remove_scoped_variables_tab ( )
view . remove_source_tab ( )
if global_gui_config . get_config_value ( "SEMANTIC_DATA_MODE" , False ) :
view . bring_tab_to_the_top ( 'Semantic Data' )
else :
if isinstance ( self . model . state , LibraryState ) :
view . bring_tab_to_the_top ( 'Description' )
else :
view . bring_tab_to_the_top ( 'Linkage Overview' )
if isinstance ( self . model , ContainerStateModel ) :
self . scopes_ctrl . reload_scoped_variables_list_store ( )
plugins . run_hook ( "post_state_editor_register_view" , self ) |
def sinkhorn_lpl1_mm ( a , labels_a , b , M , reg , eta = 0.1 , numItermax = 10 , numInnerItermax = 200 , stopInnerThr = 1e-9 , verbose = False , log = False , to_numpy = True ) :
"""Solve the entropic regularization optimal transport problem with nonconvex
group lasso regularization on GPU
If the input matrix are in numpy format , they will be uploaded to the
GPU first which can incur significant time overhead .
The function solves the following optimization problem :
. . math : :
\gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega_e(\gamma)
+ \ eta \ Omega _ g ( \ gamma )
s . t . \ gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
where :
- M is the ( ns , nt ) metric cost matrix
- : math : ` \ Omega _ e ` is the entropic regularization term
: math : ` \ Omega _ e ( \ gamma ) = \ sum _ { i , j } \ gamma _ { i , j } \ log ( \ gamma _ { i , j } ) `
- : math : ` \ Omega _ g ` is the group lasso regulaization term
: math : ` \ Omega _ g ( \ gamma ) = \ sum _ { i , c } \ | \ gamma _ { i , \ mathcal { I } _ c } \ | ^ { 1/2 } _ 1 `
where : math : ` \ mathcal { I } _ c ` are the index of samples from class c
in the source domain .
- a and b are source and target weights ( sum to 1)
The algorithm used for solving the problem is the generalised conditional
gradient as proposed in [ 5 ] _ [ 7 ] _
Parameters
a : np . ndarray ( ns , )
samples weights in the source domain
labels _ a : np . ndarray ( ns , )
labels of samples in the source domain
b : np . ndarray ( nt , )
samples weights in the target domain
M : np . ndarray ( ns , nt )
loss matrix
reg : float
Regularization term for entropic regularization > 0
eta : float , optional
Regularization term for group lasso regularization > 0
numItermax : int , optional
Max number of iterations
numInnerItermax : int , optional
Max number of iterations ( inner sinkhorn solver )
stopInnerThr : float , optional
Stop threshold on error ( inner sinkhorn solver ) ( > 0)
verbose : bool , optional
Print information along iterations
log : bool , optional
record log if True
to _ numpy : boolean , optional ( default True )
If true convert back the GPU array result to numpy format .
Returns
gamma : ( ns x nt ) ndarray
Optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log = = True in parameters
References
. . [ 5 ] N . Courty ; R . Flamary ; D . Tuia ; A . Rakotomamonjy ,
" Optimal Transport for Domain Adaptation , " in IEEE
Transactions on Pattern Analysis and Machine Intelligence ,
vol . PP , no . 99 , pp . 1-1
. . [ 7 ] Rakotomamonjy , A . , Flamary , R . , & Courty , N . ( 2015 ) .
Generalized conditional gradient : analysis of convergence
and applications . arXiv preprint arXiv : 1510.06567.
See Also
ot . lp . emd : Unregularized OT
ot . bregman . sinkhorn : Entropic regularized OT
ot . optim . cg : General regularized OT""" | a , labels_a , b , M = utils . to_gpu ( a , labels_a , b , M )
p = 0.5
epsilon = 1e-3
indices_labels = [ ]
labels_a2 = cp . asnumpy ( labels_a )
classes = npp . unique ( labels_a2 )
for c in classes :
idxc , = utils . to_gpu ( npp . where ( labels_a2 == c ) )
indices_labels . append ( idxc )
W = np . zeros ( M . shape )
for cpt in range ( numItermax ) :
Mreg = M + eta * W
transp = sinkhorn ( a , b , Mreg , reg , numItermax = numInnerItermax , stopThr = stopInnerThr , to_numpy = False )
# the transport has been computed . Check if classes are really
# separated
W = np . ones ( M . shape )
for ( i , c ) in enumerate ( classes ) :
majs = np . sum ( transp [ indices_labels [ i ] ] , axis = 0 )
majs = p * ( ( majs + epsilon ) ** ( p - 1 ) )
W [ indices_labels [ i ] ] = majs
if to_numpy :
return utils . to_np ( transp )
else :
return transp |
def clip ( self , lower = 0 , upper = 127 ) :
"""Clip the pianorolls of all tracks by the given lower and upper bounds .
Parameters
lower : int or float
The lower bound to clip the pianorolls . Defaults to 0.
upper : int or float
The upper bound to clip the pianorolls . Defaults to 127.""" | for track in self . tracks :
track . clip ( lower , upper ) |
def _get_service_info ( service ) :
'''return details about given connman service''' | service_info = pyconnman . ConnService ( os . path . join ( SERVICE_PATH , service ) )
data = { 'label' : service , 'wireless' : service_info . get_property ( 'Type' ) == 'wifi' , 'connectionid' : six . text_type ( service_info . get_property ( 'Ethernet' ) [ 'Interface' ] ) , 'hwaddr' : six . text_type ( service_info . get_property ( 'Ethernet' ) [ 'Address' ] ) }
state = service_info . get_property ( 'State' )
if state == 'ready' or state == 'online' :
data [ 'up' ] = True
data [ 'ipv4' ] = { 'gateway' : '0.0.0.0' }
ipv4 = 'IPv4'
if service_info . get_property ( 'IPv4' ) [ 'Method' ] == 'manual' :
ipv4 += '.Configuration'
ipv4_info = service_info . get_property ( ipv4 )
for info in [ 'Method' , 'Address' , 'Netmask' , 'Gateway' ] :
value = ipv4_info . get ( info )
if value is None :
log . warning ( 'Unable to get IPv4 %s for service %s\n' , info , service )
continue
if info == 'Method' :
info = 'requestmode'
if value == 'dhcp' :
value = 'dhcp_linklocal'
elif value in ( 'manual' , 'fixed' ) :
value = 'static'
data [ 'ipv4' ] [ info . lower ( ) ] = six . text_type ( value )
ipv6_info = service_info . get_property ( 'IPv6' )
for info in [ 'Address' , 'Prefix' , 'Gateway' ] :
value = ipv6_info . get ( info )
if value is None :
log . warning ( 'Unable to get IPv6 %s for service %s\n' , info , service )
continue
if 'ipv6' not in data :
data [ 'ipv6' ] = { }
data [ 'ipv6' ] [ info . lower ( ) ] = [ six . text_type ( value ) ]
nameservers = [ ]
for nameserver_prop in service_info . get_property ( 'Nameservers' ) :
nameservers . append ( six . text_type ( nameserver_prop ) )
data [ 'ipv4' ] [ 'dns' ] = nameservers
else :
data [ 'up' ] = False
data [ 'ipv4' ] = { 'requestmode' : 'disabled' }
data [ 'ipv4' ] [ 'supportedrequestmodes' ] = [ 'static' , 'dhcp_linklocal' , 'disabled' ]
return data |
def on_change_checkout ( self ) :
'''When you change checkin _ date or checkout _ date it will checked it
and update the qty of hotel folio line
@ param self : object pointer''' | configured_addition_hours = 0
fwhouse_id = self . folio_id . warehouse_id
fwc_id = fwhouse_id or fwhouse_id . company_id
if fwc_id :
configured_addition_hours = fwhouse_id . company_id . additional_hours
myduration = 0
if not self . checkin_date :
self . checkin_date = time . strftime ( DEFAULT_SERVER_DATETIME_FORMAT )
if not self . checkout_date :
self . checkout_date = time . strftime ( DEFAULT_SERVER_DATETIME_FORMAT )
chckin = self . checkin_date
chckout = self . checkout_date
if chckin and chckout :
server_dt = DEFAULT_SERVER_DATETIME_FORMAT
chkin_dt = datetime . datetime . strptime ( chckin , server_dt )
chkout_dt = datetime . datetime . strptime ( chckout , server_dt )
dur = chkout_dt - chkin_dt
sec_dur = dur . seconds
if ( not dur . days and not sec_dur ) or ( dur . days and not sec_dur ) :
myduration = dur . days
else :
myduration = dur . days + 1
# To calculate additional hours in hotel room as per minutes
if configured_addition_hours > 0 :
additional_hours = abs ( ( dur . seconds / 60 ) / 60 )
if additional_hours >= configured_addition_hours :
myduration += 1
self . product_uom_qty = myduration
hotel_room_obj = self . env [ 'hotel.room' ]
hotel_room_ids = hotel_room_obj . search ( [ ] )
avail_prod_ids = [ ]
for room in hotel_room_ids :
assigned = False
for rm_line in room . room_line_ids :
if rm_line . status != 'cancel' :
if ( self . checkin_date <= rm_line . check_in <= self . checkout_date ) or ( self . checkin_date <= rm_line . check_out <= self . checkout_date ) :
assigned = True
elif ( rm_line . check_in <= self . checkin_date <= rm_line . check_out ) or ( rm_line . check_in <= self . checkout_date <= rm_line . check_out ) :
assigned = True
if not assigned :
avail_prod_ids . append ( room . product_id . id )
domain = { 'product_id' : [ ( 'id' , 'in' , avail_prod_ids ) ] }
return { 'domain' : domain } |
def _gssa ( self , initial_conditions , t_max ) :
"""This function is inspired from Yoav Ram ' s code available at :
http : / / nbviewer . ipython . org / github / yoavram / ipython - notebooks / blob / master / GSSA . ipynb
: param initial _ conditions : the initial conditions of the system
: param t _ max : the time when the simulation should stop
: return :""" | # set the initial conditions and t0 = 0.
species_over_time = [ np . array ( initial_conditions ) . astype ( "int16" ) ]
t = 0
time_points = [ t ]
while t < t_max and species_over_time [ - 1 ] . sum ( ) > 0 :
last = species_over_time [ - 1 ]
e , dt = self . _draw ( last )
t += dt
species_over_time . append ( last + self . __change [ e , : ] )
time_points . append ( t )
return time_points , np . array ( species_over_time ) . T |
def write_pdb ( self , path ) :
"""Outputs a PDB file with the current contents of the system""" | if self . master is None and self . positions is None :
raise ValueError ( 'Topology and positions are needed to write output files.' )
with open ( path , 'w' ) as f :
PDBFile . writeFile ( self . topology , self . positions , f ) |
def __create_image ( self , inpt , hashfun ) :
"""Creates the avatar based on the input and
the chosen hash function .""" | if hashfun not in generator . HASHES . keys ( ) :
print ( "Unknown or unsupported hash function. Using default: %s" % self . DEFAULT_HASHFUN )
algo = self . DEFAULT_HASHFUN
else :
algo = hashfun
return generator . generate ( inpt , algo ) |
def _get_os ( self ) :
'''Get operating system summary''' | return { 'name' : self . _grain ( 'os' ) , 'family' : self . _grain ( 'os_family' ) , 'arch' : self . _grain ( 'osarch' ) , 'release' : self . _grain ( 'osrelease' ) , } |
def cake ( return_X_y = True ) :
"""cake dataset
Parameters
return _ X _ y : bool ,
if True , returns a model - ready tuple of data ( X , y )
otherwise , returns a Pandas DataFrame
Returns
model - ready tuple of data ( X , y )
OR
Pandas DataFrame
Notes
X contains the category of recipe used transformed to an integer ,
the catergory of replicate , and the temperatue .
y contains the angle at which the cake broke .
Source :
https : / / vincentarelbundock . github . io / Rdatasets / doc / lme4 / cake . html""" | # y is real
# recommend LinearGAM
cake = pd . read_csv ( PATH + '/cake.csv' , index_col = 0 )
if return_X_y :
X = cake [ [ 'recipe' , 'replicate' , 'temperature' ] ] . values
X [ : , 0 ] = np . unique ( cake . values [ : , 1 ] , return_inverse = True ) [ 1 ]
X [ : , 1 ] -= 1
y = cake [ 'angle' ] . values
return _clean_X_y ( X , y )
return cake |
def transact ( self , transaction = None ) :
"""Customize calling smart contract transaction functions to use ` personal _ sendTransaction `
instead of ` eth _ sendTransaction ` and to estimate gas limit . This function
is largely copied from web3 ContractFunction with important addition .
Note : will fallback to ` eth _ sendTransaction ` if ` passphrase ` is not provided in the
` transaction ` dict .
: param transaction : dict which has the required transaction arguments per
` personal _ sendTransaction ` requirements .
: return : hex str transaction hash""" | if transaction is None :
transact_transaction = { }
else :
transact_transaction = dict ( ** transaction )
if 'data' in transact_transaction :
raise ValueError ( "Cannot set data in transact transaction" )
cf = self . _contract_function
if cf . address is not None :
transact_transaction . setdefault ( 'to' , cf . address )
if cf . web3 . eth . defaultAccount is not empty :
transact_transaction . setdefault ( 'from' , cf . web3 . eth . defaultAccount )
if 'to' not in transact_transaction :
if isinstance ( self , type ) :
raise ValueError ( "When using `Contract.transact` from a contract factory you " "must provide a `to` address with the transaction" )
else :
raise ValueError ( "Please ensure that this contract instance has an address." )
if 'gas' not in transact_transaction :
tx = transaction . copy ( )
if 'passphrase' in tx :
tx . pop ( 'passphrase' )
gas = cf . estimateGas ( tx )
transact_transaction [ 'gas' ] = gas
return transact_with_contract_function ( cf . address , cf . web3 , cf . function_identifier , transact_transaction , cf . contract_abi , cf . abi , * cf . args , ** cf . kwargs ) |
def file2abspath ( filename , this_file = __file__ ) :
"""generate absolute path for the given file and base dir""" | return os . path . abspath ( os . path . join ( os . path . dirname ( os . path . abspath ( this_file ) ) , filename ) ) |
def _fast_dataset ( variables : 'OrderedDict[Any, Variable]' , coord_variables : Mapping [ Any , Variable ] , ) -> 'Dataset' :
"""Create a dataset as quickly as possible .
Beware : the ` variables ` OrderedDict is modified INPLACE .""" | from . dataset import Dataset
variables . update ( coord_variables )
coord_names = set ( coord_variables )
return Dataset . _from_vars_and_coord_names ( variables , coord_names ) |
def from_signed_raw ( cls : Type [ PeerType ] , raw : str ) -> PeerType :
"""Return a Peer instance from a signed raw format string
: param raw : Signed raw format string
: return :""" | lines = raw . splitlines ( True )
n = 0
version = int ( Peer . parse_field ( "Version" , lines [ n ] ) )
n += 1
Peer . parse_field ( "Type" , lines [ n ] )
n += 1
currency = Peer . parse_field ( "Currency" , lines [ n ] )
n += 1
pubkey = Peer . parse_field ( "Pubkey" , lines [ n ] )
n += 1
block_uid = BlockUID . from_str ( Peer . parse_field ( "Block" , lines [ n ] ) )
n += 1
Peer . parse_field ( "Endpoints" , lines [ n ] )
n += 1
endpoints = [ ]
while not Peer . re_signature . match ( lines [ n ] ) :
endpoints . append ( endpoint ( lines [ n ] ) )
n += 1
data = Peer . re_signature . match ( lines [ n ] )
if data is None :
raise MalformedDocumentError ( "Peer" )
signature = data . group ( 1 )
return cls ( version , currency , pubkey , block_uid , endpoints , signature ) |
def _var_bounds ( self ) :
"""Returns bounds on the optimisation variables .""" | x0 = array ( [ ] )
xmin = array ( [ ] )
xmax = array ( [ ] )
for var in self . om . vars :
x0 = r_ [ x0 , var . v0 ]
xmin = r_ [ xmin , var . vl ]
xmax = r_ [ xmax , var . vu ]
return x0 , xmin , xmax |
def increment_extension_daily_stat ( self , publisher_name , extension_name , version , stat_type ) :
"""IncrementExtensionDailyStat .
[ Preview API ] Increments a daily statistic associated with the extension
: param str publisher _ name : Name of the publisher
: param str extension _ name : Name of the extension
: param str version : Version of the extension
: param str stat _ type : Type of stat to increment""" | route_values = { }
if publisher_name is not None :
route_values [ 'publisherName' ] = self . _serialize . url ( 'publisher_name' , publisher_name , 'str' )
if extension_name is not None :
route_values [ 'extensionName' ] = self . _serialize . url ( 'extension_name' , extension_name , 'str' )
if version is not None :
route_values [ 'version' ] = self . _serialize . url ( 'version' , version , 'str' )
query_parameters = { }
if stat_type is not None :
query_parameters [ 'statType' ] = self . _serialize . query ( 'stat_type' , stat_type , 'str' )
self . _send ( http_method = 'POST' , location_id = '4fa7adb6-ca65-4075-a232-5f28323288ea' , version = '5.0-preview.1' , route_values = route_values , query_parameters = query_parameters ) |
def filter_curriculum ( curriculum , week , weekday = None ) :
"""筛选出指定星期 [ 和指定星期几 ] 的课程
: param curriculum : 课程表数据
: param week : 需要筛选的周数 , 是一个代表周数的正整数
: param weekday : 星期几 , 是一个代表星期的整数 , 1-7 对应周一到周日
: return : 如果 weekday 参数没给出 , 返回的格式与原课表一致 , 但只包括了在指定周数的课程 , 否则返回指定周数和星期几的当天课程""" | if weekday :
c = [ deepcopy ( curriculum [ weekday - 1 ] ) ]
else :
c = deepcopy ( curriculum )
for d in c :
l = len ( d )
for t_idx in range ( l ) :
t = d [ t_idx ]
if t is None :
continue
# 一般同一时间课程不会重复 , 重复时给出警告
t = list ( filter ( lambda k : week in k [ '上课周数' ] , t ) ) or None
if t is not None and len ( t ) > 1 :
logger . warning ( '第 %d 周周 %d 第 %d 节课有冲突: %s' , week , weekday or c . index ( d ) + 1 , t_idx + 1 , t )
d [ t_idx ] = t
return c [ 0 ] if weekday else c |
def get_local_client ( c_path = os . path . join ( syspaths . CONFIG_DIR , 'master' ) , mopts = None , skip_perm_errors = False , io_loop = None , auto_reconnect = False ) :
'''. . versionadded : : 2014.7.0
Read in the config and return the correct LocalClient object based on
the configured transport
: param IOLoop io _ loop : io _ loop used for events .
Pass in an io _ loop if you want asynchronous
operation for obtaining events . Eg use of
set _ event _ handler ( ) API . Otherwise , operation
will be synchronous .''' | if mopts :
opts = mopts
else : # Late import to prevent circular import
import salt . config
opts = salt . config . client_config ( c_path )
# TODO : AIO core is separate from transport
return LocalClient ( mopts = opts , skip_perm_errors = skip_perm_errors , io_loop = io_loop , auto_reconnect = auto_reconnect ) |
def call ( napalm_device , method , * args , ** kwargs ) :
'''Calls arbitrary methods from the network driver instance .
Please check the readthedocs _ page for the updated list of getters .
. . _ readthedocs : http : / / napalm . readthedocs . org / en / latest / support / index . html # getters - support - matrix
method
Specifies the name of the method to be called .
* args
Arguments .
* * kwargs
More arguments .
: return : A dictionary with three keys :
* result ( True / False ) : if the operation succeeded
* out ( object ) : returns the object as - is from the call
* comment ( string ) : provides more details in case the call failed
* traceback ( string ) : complete traceback in case of exception . Please submit an issue including this traceback on the ` correct driver repo ` _ and make sure to read the FAQ _
. . _ ` correct driver repo ` : https : / / github . com / napalm - automation / napalm / issues / new
. . FAQ _ : https : / / github . com / napalm - automation / napalm # faq
Example :
. . code - block : : python
salt . utils . napalm . call (
napalm _ object ,
' cli ' ,
' show version ' ,
' show chassis fan ' ''' | result = False
out = None
opts = napalm_device . get ( '__opts__' , { } )
retry = kwargs . pop ( '__retry' , True )
# retry executing the task ?
force_reconnect = kwargs . get ( 'force_reconnect' , False )
if force_reconnect :
log . debug ( 'Forced reconnection initiated' )
log . debug ( 'The current opts (under the proxy key):' )
log . debug ( opts [ 'proxy' ] )
opts [ 'proxy' ] . update ( ** kwargs )
log . debug ( 'Updated to:' )
log . debug ( opts [ 'proxy' ] )
napalm_device = get_device ( opts )
try :
if not napalm_device . get ( 'UP' , False ) :
raise Exception ( 'not connected' )
# if connected will try to execute desired command
kwargs_copy = { }
kwargs_copy . update ( kwargs )
for karg , warg in six . iteritems ( kwargs_copy ) : # lets clear None arguments
# to not be sent to NAPALM methods
if warg is None :
kwargs . pop ( karg )
out = getattr ( napalm_device . get ( 'DRIVER' ) , method ) ( * args , ** kwargs )
# calls the method with the specified parameters
result = True
except Exception as error : # either not connected
# either unable to execute the command
hostname = napalm_device . get ( 'HOSTNAME' , '[unspecified hostname]' )
err_tb = traceback . format_exc ( )
# let ' s get the full traceback and display for debugging reasons .
if isinstance ( error , NotImplementedError ) :
comment = '{method} is not implemented for the NAPALM {driver} driver!' . format ( method = method , driver = napalm_device . get ( 'DRIVER_NAME' ) )
elif retry and HAS_CONN_CLOSED_EXC_CLASS and isinstance ( error , ConnectionClosedException ) : # Received disconection whilst executing the operation .
# Instructed to retry ( default behaviour )
# thus trying to re - establish the connection
# and re - execute the command
# if any of the operations ( close , open , call ) will rise again ConnectionClosedException
# it will fail loudly .
kwargs [ '__retry' ] = False
# do not attempt re - executing
comment = 'Disconnected from {device}. Trying to reconnect.' . format ( device = hostname )
log . error ( err_tb )
log . error ( comment )
log . debug ( 'Clearing the connection with %s' , hostname )
call ( napalm_device , 'close' , __retry = False )
# safely close the connection
# Make sure we don ' t leave any TCP connection open behind
# if we fail to close properly , we might not be able to access the
log . debug ( 'Re-opening the connection with %s' , hostname )
call ( napalm_device , 'open' , __retry = False )
log . debug ( 'Connection re-opened with %s' , hostname )
log . debug ( 'Re-executing %s' , method )
return call ( napalm_device , method , * args , ** kwargs )
# If still not able to reconnect and execute the task ,
# the proxy keepalive feature ( if enabled ) will attempt
# to reconnect .
# If the device is using a SSH - based connection , the failure
# will also notify the paramiko transport and the ` is _ alive ` flag
# is going to be set correctly .
# More background : the network device may decide to disconnect ,
# although the SSH session itself is alive and usable , the reason
# being the lack of activity on the CLI .
# Paramiko ' s keepalive doesn ' t help in this case , as the ServerAliveInterval
# are targeting the transport layer , whilst the device takes the decision
# when there isn ' t any activity on the CLI , thus at the application layer .
# Moreover , the disconnect is silent and paramiko ' s is _ alive flag will
# continue to return True , although the connection is already unusable .
# For more info , see https : / / github . com / paramiko / paramiko / issues / 813.
# But after a command fails , the ` is _ alive ` flag becomes aware of these
# changes and will return False from there on . And this is how the
# Salt proxy keepalive helps : immediately after the first failure , it
# will know the state of the connection and will try reconnecting .
else :
comment = 'Cannot execute "{method}" on {device}{port} as {user}. Reason: {error}!' . format ( device = napalm_device . get ( 'HOSTNAME' , '[unspecified hostname]' ) , port = ( ':{port}' . format ( port = napalm_device . get ( 'OPTIONAL_ARGS' , { } ) . get ( 'port' ) ) if napalm_device . get ( 'OPTIONAL_ARGS' , { } ) . get ( 'port' ) else '' ) , user = napalm_device . get ( 'USERNAME' , '' ) , method = method , error = error )
log . error ( comment )
log . error ( err_tb )
return { 'out' : { } , 'result' : False , 'comment' : comment , 'traceback' : err_tb }
finally :
if opts and not_always_alive ( opts ) and napalm_device . get ( 'CLOSE' , True ) : # either running in a not - always - alive proxy
# either running in a regular minion
# close the connection when the call is over
# unless the CLOSE is explicitly set as False
napalm_device [ 'DRIVER' ] . close ( )
return { 'out' : out , 'result' : result , 'comment' : '' } |
def discover ( timeout = 5 ) :
"""Convenience method to discover UPnP devices on the network . Returns a
list of ` upnp . Device ` instances . Any invalid servers are silently
ignored .""" | devices = { }
for entry in scan ( timeout ) :
if entry . location in devices :
continue
try :
devices [ entry . location ] = Device ( entry . location )
except Exception as exc :
log = _getLogger ( "ssdp" )
log . error ( 'Error \'%s\' for %s' , exc , entry . location )
return list ( devices . values ( ) ) |
def loop_until_closed ( self , suppress_warning = False ) :
'''Execute a blocking loop that runs and executes event callbacks
until the connection is closed ( e . g . by hitting Ctrl - C ) .
While this method can be used to run Bokeh application code " outside "
the Bokeh server , this practice is HIGHLY DISCOURAGED for any real
use case . This function is intented to facilitate testing ONLY .''' | suppress_warning
# shut up flake
from bokeh . util . deprecation import deprecated
deprecated ( "ClientSession.loop_until_closed is deprecated, and will be removed in an eventual 2.0 release. " "Run Bokeh applications directly on a Bokeh server instead. See:\n\n" " https//docs.bokeh.org/en/latest/docs/user_guide/server.html\n" )
self . _connection . loop_until_closed ( ) |
def search ( self , song_title , limit = 1 ) :
"""根据歌曲名搜索歌曲
: params : song _ title : 歌曲名
limit : 搜索数量""" | url = "http://music.163.com/api/search/pc"
headers = { 'Cookie' : 'appver=1.5.2' , 'Referer' : 'http://music.163.com' }
payload = { 's' : song_title , 'limit' : limit , 'type' : 1 }
r = requests . post ( url , params = payload , headers = headers )
data = json . loads ( r . text )
if data [ 'code' ] == 200 :
return data [ 'result' ] [ 'songs' ] [ 0 ]
else :
return None |
def create ( self , type , friendly_name = values . unset , certificate = values . unset , private_key = values . unset , sandbox = values . unset , api_key = values . unset , secret = values . unset ) :
"""Create a new CredentialInstance
: param CredentialInstance . PushService type : The Credential type
: param unicode friendly _ name : A string to describe the resource
: param unicode certificate : [ APN only ] The URL - encoded representation of the certificate
: param unicode private _ key : [ APN only ] URL - encoded representation of the private key
: param bool sandbox : [ APN only ] Whether to send the credential to sandbox APNs
: param unicode api _ key : [ GCM only ] The ` Server key ` of your project from Firebase console under Settings / Cloud messaging
: param unicode secret : [ FCM only ] The ` Server key ` of your project from Firebase console under Settings / Cloud messaging
: returns : Newly created CredentialInstance
: rtype : twilio . rest . notify . v1 . credential . CredentialInstance""" | data = values . of ( { 'Type' : type , 'FriendlyName' : friendly_name , 'Certificate' : certificate , 'PrivateKey' : private_key , 'Sandbox' : sandbox , 'ApiKey' : api_key , 'Secret' : secret , } )
payload = self . _version . create ( 'POST' , self . _uri , data = data , )
return CredentialInstance ( self . _version , payload , ) |
def get_extents ( self , element , ranges , range_type = 'combined' ) :
"""Make adjustments to plot extents by computing
stacked bar heights , adjusting the bar baseline
and forcing the x - axis to be categorical .""" | if self . batched :
overlay = self . current_frame
element = Bars ( overlay . table ( ) , kdims = element . kdims + overlay . kdims , vdims = element . vdims )
for kd in overlay . kdims :
ranges [ kd . name ] [ 'combined' ] = overlay . range ( kd )
extents = super ( BarPlot , self ) . get_extents ( element , ranges , range_type )
xdim = element . kdims [ 0 ]
ydim = element . vdims [ 0 ]
# Compute stack heights
if self . stacked or self . stack_index :
ds = Dataset ( element )
pos_range = ds . select ( ** { ydim . name : ( 0 , None ) } ) . aggregate ( xdim , function = np . sum ) . range ( ydim )
neg_range = ds . select ( ** { ydim . name : ( None , 0 ) } ) . aggregate ( xdim , function = np . sum ) . range ( ydim )
y0 , y1 = max_range ( [ pos_range , neg_range ] )
else :
y0 , y1 = ranges [ ydim . name ] [ 'combined' ]
padding = 0 if self . overlaid else self . padding
_ , ypad , _ = get_axis_padding ( padding )
y0 , y1 = range_pad ( y0 , y1 , ypad , self . logy )
# Set y - baseline
if y0 < 0 :
y1 = max ( [ y1 , 0 ] )
elif self . logy :
y0 = ( ydim . range [ 0 ] or ( 10 ** ( np . log10 ( y1 ) - 2 ) ) if y1 else 0.01 )
else :
y0 = 0
# Ensure x - axis is picked up as categorical
x0 = xdim . pprint_value ( extents [ 0 ] )
x1 = xdim . pprint_value ( extents [ 2 ] )
return ( x0 , y0 , x1 , y1 ) |
def log_response ( handler ) :
"""Acturally , logging response is not a server ' s responsibility ,
you should use http tools like Chrome Developer Tools to analyse the response .
Although this function and its setting ( LOG _ RESPONSE ) is not recommended to use ,
if you are laze as I was and working in development , nothing could stop you .""" | content_type = handler . _headers . get ( 'Content-Type' , None )
headers_str = handler . _generate_headers ( )
block = 'Response Infomations:\n' + headers_str . strip ( )
if content_type and ( 'text' in content_type or 'json' in content_type ) :
limit = 0
if 'LOG_RESPONSE_LINE_LIMIT' in settings :
limit = settings [ 'LOG_RESPONSE_LINE_LIMIT' ]
def cut ( s ) :
if limit and len ( s ) > limit :
return [ s [ : limit ] ] + cut ( s [ limit : ] )
else :
return [ s ]
body = '' . join ( handler . _write_buffer )
lines = [ ]
for i in body . split ( '\n' ) :
lines += [ '| ' + j for j in cut ( i ) ]
block += '\nBody:\n' + '\n' . join ( lines )
app_log . info ( block ) |
def _getError ( self , device , message ) :
"""Get the error message or value stored in the Qik hardware .
: Parameters :
device : ` int `
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol .
message : ` bool `
If set to ` True ` a text message will be returned , if set to ` False `
the integer stored in the Qik will be returned .
: Returns :
A list of text messages , integers , or and empty list . See the
` message ` parameter above .""" | cmd = self . _COMMAND . get ( 'get-error' )
self . _writeData ( cmd , device )
result = [ ]
bits = [ ]
try :
num = self . _serial . read ( size = 1 )
num = ord ( num )
except serial . SerialException as e :
self . _log and self . _log . error ( "Error: %s" , e , exc_info = True )
raise e
except TypeError as e :
num = 0
for i in range ( 7 , - 1 , - 1 ) :
bit = num & ( 1 << i )
if bit :
if message :
result . append ( self . _ERRORS . get ( bit ) )
else :
result . append ( bit )
return result |
def generate ( self , text ) :
"""Generate and save avatars , return a list of file name : [ filename _ s , filename _ m , filename _ l ] .
: param text : The text used to generate image .""" | sizes = current_app . config [ 'AVATARS_SIZE_TUPLE' ]
path = current_app . config [ 'AVATARS_SAVE_PATH' ]
suffix = { sizes [ 0 ] : 's' , sizes [ 1 ] : 'm' , sizes [ 2 ] : 'l' }
for size in sizes :
image_byte_array = self . get_image ( string = str ( text ) , width = int ( size ) , height = int ( size ) , pad = int ( size * 0.1 ) )
self . save ( image_byte_array , save_location = os . path . join ( path , '%s_%s.png' % ( text , suffix [ size ] ) ) )
return [ text + '_s.png' , text + '_m.png' , text + '_l.png' ] |
def is_valid ( hal_id ) :
"""Check that a given HAL id is a valid one .
: param hal _ id : The HAL id to be checked .
: returns : Boolean indicating whether the HAL id is valid or not .
> > > is _ valid ( " hal - 01258754 , version 1 " )
True
> > > is _ valid ( " hal - 01258754 " )
True
> > > is _ valid ( " hal - 01258754v2 " )
True
> > > is _ valid ( " foobar " )
False""" | match = REGEX . match ( hal_id )
return ( match is not None ) and ( match . group ( 0 ) == hal_id ) |
def _GetChunkForReading ( self , chunk ) :
"""Returns the relevant chunk from the datastore and reads ahead .""" | try :
return self . chunk_cache . Get ( chunk )
except KeyError :
pass
# We don ' t have this chunk already cached . The most common read
# access pattern is contiguous reading so since we have to go to
# the data store already , we read ahead to reduce round trips .
missing_chunks = [ ]
for chunk_number in range ( chunk , chunk + 10 ) :
if chunk_number not in self . chunk_cache :
missing_chunks . append ( chunk_number )
self . _ReadChunks ( missing_chunks )
# This should work now - otherwise we just give up .
try :
return self . chunk_cache . Get ( chunk )
except KeyError :
raise aff4 . ChunkNotFoundError ( "Cannot open chunk %s" % chunk ) |
def isBlockComment ( self , line , column ) :
"""Check if text at given position is a block comment .
If language is not known , or text is not parsed yet , ` ` False ` ` is returned""" | return self . _highlighter is not None and self . _highlighter . isBlockComment ( self . document ( ) . findBlockByNumber ( line ) , column ) |
def to_pickle ( self , filepath ) :
"""Parameters
filepath : str .
Should end in . pkl . If it does not , " . pkl " will be appended to the
passed string .
Returns
None . Saves the model object to the location specified by ` filepath ` .""" | if not isinstance ( filepath , str ) :
raise ValueError ( "filepath must be a string." )
if not filepath . endswith ( ".pkl" ) :
filepath = filepath + ".pkl"
with open ( filepath , "wb" ) as f :
pickle . dump ( self , f )
print ( "Model saved to {}" . format ( filepath ) )
return None |
def hourly_dew_point ( self ) :
"""A data collection containing hourly dew points over they day .""" | dpt_data = self . _humidity_condition . hourly_dew_point_values ( self . _dry_bulb_condition )
return self . _get_daily_data_collections ( temperature . DewPointTemperature ( ) , 'C' , dpt_data ) |
def _encode_query ( query ) :
"""Quote all values of a query string .""" | if query == '' :
return query
query_args = [ ]
for query_kv in query . split ( '&' ) :
k , v = query_kv . split ( '=' )
query_args . append ( k + "=" + quote ( v . encode ( 'utf-8' ) ) )
return '&' . join ( query_args ) |
def update ( self ) :
"""Update this ` ~ photutils . isophote . EllipseSample ` instance with
the intensity integrated at the ( x0 , y0 ) center position using
bilinear integration . The local gradient is set to ` None ` .""" | s = self . extract ( )
self . mean = s [ 2 ] [ 0 ]
self . gradient = None
self . gradient_error = None
self . gradient_relative_error = None |
def restriction ( lam , mu , orbitals , U , beta ) :
"""Equation that determines the restriction on lagrange multipier""" | return 2 * orbitals * fermi_dist ( - ( mu + lam ) , beta ) - expected_filling ( - 1 * lam , orbitals , U , beta ) |
def sha256_digest ( instr ) :
'''Generate a sha256 hash of a given string .''' | return salt . utils . stringutils . to_unicode ( hashlib . sha256 ( salt . utils . stringutils . to_bytes ( instr ) ) . hexdigest ( ) ) |
def mxmt ( m1 , m2 ) :
"""Multiply a 3x3 matrix and the transpose of another 3x3 matrix .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / mxmt _ c . html
: param m1 : 3x3 double precision matrix .
: type m1 : 3x3 - Element Array of floats
: param m2 : 3x3 double precision matrix .
: type m2 : 3x3 - Element Array of floats
: return : The product m1 times m2 transpose .
: rtype : float""" | m1 = stypes . toDoubleMatrix ( m1 )
m2 = stypes . toDoubleMatrix ( m2 )
mout = stypes . emptyDoubleMatrix ( )
libspice . mxmt_c ( m1 , m2 , mout )
return stypes . cMatrixToNumpy ( mout ) |
def find_file ( search_dir , file_pattern ) :
"""Search for a file in a directory , and return the first match .
If the file is not found return an empty string
Args :
search _ dir : The root directory to search in
file _ pattern : A unix - style wildcard pattern representing
the file to find
Returns :
The path to the file if it was found , otherwise an empty string""" | for root , dirnames , fnames in os . walk ( search_dir ) :
for fname in fnames :
if fnmatch . fnmatch ( fname , file_pattern ) :
return os . path . join ( root , fname )
return "" |
def attachment_show ( self , id , ** kwargs ) :
"https : / / developer . zendesk . com / rest _ api / docs / core / attachments # show - attachment" | api_path = "/api/v2/attachments/{id}.json"
api_path = api_path . format ( id = id )
return self . call ( api_path , ** kwargs ) |
def run ( self ) :
"""Starts the blotter
Connects to the TWS / GW , processes and logs market data ,
and broadcast it over TCP via ZeroMQ ( which algo subscribe to )""" | self . _check_unique_blotter ( )
# connect to mysql
self . mysql_connect ( )
self . context = zmq . Context ( zmq . REP )
self . socket = self . context . socket ( zmq . PUB )
self . socket . bind ( "tcp://*:" + str ( self . args [ 'zmqport' ] ) )
db_modified = 0
contracts = [ ]
prev_contracts = [ ]
first_run = True
self . log_blotter . info ( "Connecting to Interactive Brokers..." )
self . ibConn = ezIBpy ( )
self . ibConn . ibCallback = self . ibCallback
while not self . ibConn . connected :
self . ibConn . connect ( clientId = int ( self . args [ 'ibclient' ] ) , port = int ( self . args [ 'ibport' ] ) , host = str ( self . args [ 'ibserver' ] ) )
time . sleep ( 1 )
if not self . ibConn . connected :
print ( '*' , end = "" , flush = True )
self . log_blotter . info ( "Connection established..." )
try :
while True :
if not os . path . exists ( self . args [ 'symbols' ] ) :
pd . DataFrame ( columns = [ 'symbol' , 'sec_type' , 'exchange' , 'currency' , 'expiry' , 'strike' , 'opt_type' ] ) . to_csv ( self . args [ 'symbols' ] , header = True , index = False )
tools . chmod ( self . args [ 'symbols' ] )
else :
time . sleep ( 0.1 )
# read db properties
db_data = os . stat ( self . args [ 'symbols' ] )
db_size = db_data . st_size
db_last_modified = db_data . st_mtime
# empty file
if db_size == 0 :
if prev_contracts :
self . log_blotter . info ( 'Cancel market data...' )
self . ibConn . cancelMarketData ( )
time . sleep ( 0.1 )
prev_contracts = [ ]
continue
# modified ?
if not first_run and db_last_modified == db_modified :
continue
# continue . . .
db_modified = db_last_modified
# read contructs db
df = pd . read_csv ( self . args [ 'symbols' ] , header = 0 )
if df . empty :
continue
# removed expired
df = df [ ( ( df [ 'expiry' ] < 1000000 ) & ( df [ 'expiry' ] >= int ( datetime . now ( ) . strftime ( '%Y%m' ) ) ) ) | ( ( df [ 'expiry' ] >= 1000000 ) & ( df [ 'expiry' ] >= int ( datetime . now ( ) . strftime ( '%Y%m%d' ) ) ) ) | np_isnan ( df [ 'expiry' ] ) ]
# fix expiry formatting ( no floats )
df [ 'expiry' ] = df [ 'expiry' ] . fillna ( 0 ) . astype ( int ) . astype ( str )
df . loc [ df [ 'expiry' ] == "0" , 'expiry' ] = ""
df = df [ df [ 'sec_type' ] != 'BAG' ]
df . fillna ( "" , inplace = True )
df . to_csv ( self . args [ 'symbols' ] , header = True , index = False )
tools . chmod ( self . args [ 'symbols' ] )
# ignore commentee
df = df [ ~ df [ 'symbol' ] . str . contains ( "#" ) ]
contracts = [ tuple ( x ) for x in df . values ]
if first_run :
first_run = False
else :
if contracts != prev_contracts : # cancel market data for removed contracts
for contract in prev_contracts :
if contract not in contracts :
self . ibConn . cancelMarketData ( self . ibConn . createContract ( contract ) )
if self . args [ 'orderbook' ] :
self . ibConn . cancelMarketDepth ( self . ibConn . createContract ( contract ) )
time . sleep ( 0.1 )
contract_string = self . ibConn . contractString ( contract ) . split ( '_' ) [ 0 ]
self . log_blotter . info ( 'Contract Removed [%s]' , contract_string )
# request market data
for contract in contracts :
if contract not in prev_contracts :
self . ibConn . requestMarketData ( self . ibConn . createContract ( contract ) )
if self . args [ 'orderbook' ] :
self . ibConn . requestMarketDepth ( self . ibConn . createContract ( contract ) )
time . sleep ( 0.1 )
contract_string = self . ibConn . contractString ( contract ) . split ( '_' ) [ 0 ]
self . log_blotter . info ( 'Contract Added [%s]' , contract_string )
# update latest contracts
prev_contracts = contracts
time . sleep ( 2 )
except ( KeyboardInterrupt , SystemExit ) :
self . quitting = True
# don ' t display connection errors on ctrl + c
print ( "\n\n>>> Interrupted with Ctrl-c...\n(waiting for running tasks to be completed)\n" )
# asynctools . multitasking . killall ( ) # stop now
asynctools . multitasking . wait_for_tasks ( )
# wait for threads to complete
sys . exit ( 1 ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.