signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def get_visible_toolbars ( self ) :
"""Collects the visible toolbars .""" | toolbars = [ ]
for toolbar in self . toolbarslist :
if toolbar . toggleViewAction ( ) . isChecked ( ) :
toolbars . append ( toolbar )
self . visible_toolbars = toolbars |
def make_eventrule ( date_rule , time_rule , cal , half_days = True ) :
"""Constructs an event rule from the factory api .""" | _check_if_not_called ( date_rule )
_check_if_not_called ( time_rule )
if half_days :
inner_rule = date_rule & time_rule
else :
inner_rule = date_rule & time_rule & NotHalfDay ( )
opd = OncePerDay ( rule = inner_rule )
# This is where a scheduled function ' s rule is associated with a calendar .
opd . cal = cal
return opd |
def pressAndHold ( * args ) :
'''press and hold . Do NOT release .
accepts as many arguments as you want .
e . g . pressAndHold ( ' left _ arrow ' , ' a ' , ' b ' ) .''' | for i in args :
win32api . keybd_event ( VK_CODE [ i ] , 0 , 0 , 0 )
time . sleep ( .05 ) |
def read_ligolw ( source , contenthandler = LIGOLWContentHandler , ** kwargs ) :
"""Read one or more LIGO _ LW format files
Parameters
source : ` str ` , ` file `
the open file or file path to read
contenthandler : ` ~ xml . sax . handler . ContentHandler ` , optional
content handler used to parse document
verbose : ` bool ` , optional
be verbose when reading files , default : ` False `
Returns
xmldoc : : class : ` ~ ligo . lw . ligolw . Document `
the document object as parsed from the file ( s )""" | from ligo . lw . ligolw import Document
from ligo . lw import types
from ligo . lw . lsctables import use_in
from ligo . lw . utils import ( load_url , ligolw_add )
# mock ToPyType to link to numpy dtypes
topytype = types . ToPyType . copy ( )
for key in types . ToPyType :
if key in types . ToNumPyType :
types . ToPyType [ key ] = numpy . dtype ( types . ToNumPyType [ key ] ) . type
contenthandler = use_in ( contenthandler )
# read one or more files into a single Document
source = file_list ( source )
try :
if len ( source ) == 1 :
return load_url ( source [ 0 ] , contenthandler = contenthandler , ** kwargs )
return ligolw_add . ligolw_add ( Document ( ) , source , contenthandler = contenthandler , ** kwargs )
except LigolwElementError as exc : # failed to read with ligo . lw ,
# try again with glue . ligolw ( ilwdchar _ compat )
if LIGO_LW_COMPAT_ERROR . search ( str ( exc ) ) :
try :
return read_ligolw ( source , contenthandler = contenthandler , ilwdchar_compat = True , ** kwargs )
except Exception : # if fails for any reason , use original error
pass
raise
finally : # replace ToPyType
types . ToPyType = topytype |
def _get_error_message ( self , response ) :
"""Parse and return the first error message""" | error_message = 'An error occurred processing your request.'
try :
content = response . json ( )
# { " errors " : [ { " code " : 34 , " message " : " Sorry ,
# that page does not exist " } ] }
error_message = content [ 'errors' ] [ 0 ] [ 'message' ]
except TypeError :
error_message = content [ 'errors' ]
except ValueError : # bad json data from Twitter for an error
pass
except ( KeyError , IndexError ) : # missing data so fallback to default message
pass
return error_message |
def vorticity ( u , v , dx , dy ) :
r"""Calculate the vertical vorticity of the horizontal wind .
Parameters
u : ( M , N ) ndarray
x component of the wind
v : ( M , N ) ndarray
y component of the wind
dx : float or ndarray
The grid spacing ( s ) in the x - direction . If an array , there should be one item less than
the size of ` u ` along the applicable axis .
dy : float or ndarray
The grid spacing ( s ) in the y - direction . If an array , there should be one item less than
the size of ` u ` along the applicable axis .
Returns
( M , N ) ndarray
vertical vorticity
See Also
divergence
Notes
If inputs have more than two dimensions , they are assumed to have either leading dimensions
of ( x , y ) or trailing dimensions of ( y , x ) , depending on the value of ` ` dim _ order ` ` .""" | dudy = first_derivative ( u , delta = dy , axis = - 2 )
dvdx = first_derivative ( v , delta = dx , axis = - 1 )
return dvdx - dudy |
def generate_hashes ( filepath , blocksize = 65536 ) :
'''Generate several hashes ( md5 and sha1 ) in a single sweep of the file . Using two hashes lowers the probability of collision and false negative ( file modified but the hash is the same ) . Supports big files by streaming blocks by blocks to the hasher automatically . Blocksize can be any multiple of 128.''' | # Init hashers
hasher_md5 = hashlib . md5 ( )
hasher_sha1 = hashlib . sha1 ( )
# Read the file blocks by blocks
with open ( filepath , 'rb' ) as afile :
buf = afile . read ( blocksize )
while len ( buf ) > 0 : # Compute both hashes at the same time
hasher_md5 . update ( buf )
hasher_sha1 . update ( buf )
# Load the next data block from file
buf = afile . read ( blocksize )
return ( hasher_md5 . hexdigest ( ) , hasher_sha1 . hexdigest ( ) ) |
def reduce_stock ( self , product_id , sku_info , quantity ) :
"""减少库存
: param product _ id : 商品ID
: param sku _ info : sku信息 , 格式 " id1 : vid1 ; id2 : vid2 " , 如商品为统一规格 , 则此处赋值为空字符串即可
: param quantity : 减少的库存数量
: return : 返回的 JSON 数据包""" | return self . _post ( 'merchant/stock/reduce' , data = { "product_id" : product_id , "sku_info" : sku_info , "quantity" : quantity } ) |
def insert ( self , seq ) :
"""Populates the DB from a sequence of strings , ERASING PREVIOUS STATE .
: param seq : an iterable""" | # erase previous elements and make defaultdict for easier insertion .
self . _els_idxed = defaultdict ( lambda : defaultdict ( set ) )
if type ( seq ) is str :
raise ValueError ( 'Provided argument should be a sequence of strings' ', but not a string itself.' )
for el in seq :
if type ( el ) is not str :
raise ValueError ( 'Element %s is not a string' % ( el , ) )
for gram in make_unique_ngrams ( el , self . idx_size ) :
self . _els_idxed [ gram ] [ len ( el ) ] . add ( el )
# convert defaultdict to dict so as to not increase size when checking
# for presence of an element
self . _finalize_db ( ) |
def to_dict ( self ) :
"""Convert the object into a json serializable dictionary .
Note : It uses the private method _ save _ to _ input _ dict of the parent .
: return dict : json serializable dictionary containing the needed information to instantiate the object""" | input_dict = super ( Add , self ) . _save_to_input_dict ( )
input_dict [ "class" ] = str ( "GPy.kern.Add" )
return input_dict |
def analyse ( self , demand_item , demand_item_code ) :
"""Run the analyis of the model
Doesn ' t return anything , but creates a new item ` ` LcoptModel . result _ set ` ` containing the results""" | my_analysis = Bw2Analysis ( self )
self . result_set = my_analysis . run_analyses ( demand_item , demand_item_code , ** self . analysis_settings )
return True |
def get ( number , locale ) :
"""Returns the plural position to use for the given locale and number .
@ type number : int
@ param number : The number
@ type locale : str
@ param locale : The locale
@ rtype : int
@ return : The plural position""" | if locale == 'pt_BR' : # temporary set a locale for brazilian
locale = 'xbr'
if len ( locale ) > 3 :
locale = locale . split ( "_" ) [ 0 ]
rule = PluralizationRules . _rules . get ( locale , lambda _ : 0 )
_return = rule ( number )
if not isinstance ( _return , int ) or _return < 0 :
return 0
return _return |
def in_resource ( self , field , resource ) :
"""Return True if resource contains a valid value for the field
( not an empty or None value )""" | resource_field = resource . get ( field , None )
return resource_field is not None and resource_field != '' |
def refresh ( self , reload = False ) :
""": param reload : Make the request to return a new profile tree . This will
result in the caching of the profile _ tree attribute . The
new profile _ tree will be returned .""" | util . cached_property . bust_caches ( self , excludes = ( 'authcode' ) )
self . questions = self . question_fetchable ( )
if reload :
return self . profile_tree |
def definitions_help ( ) :
"""Help message for Definitions .
. . versionadded : : 4.0.0
: returns : A message object containing helpful information .
: rtype : messaging . message . Message""" | message = m . Message ( )
message . add ( m . Brand ( ) )
message . add ( heading ( ) )
message . add ( content ( ) )
return message |
def prepare_blacklist ( src , dst , duration = 3600 , src_port1 = None , src_port2 = None , src_proto = 'predefined_tcp' , dst_port1 = None , dst_port2 = None , dst_proto = 'predefined_tcp' ) :
"""Create a blacklist entry .
A blacklist can be added directly from the engine node , or from
the system context . If submitting from the system context , it becomes
a global blacklist . This will return the properly formatted json
to submit .
: param src : source address , with cidr , i . e . 10.10.10.10/32 or ' any '
: param dst : destination address with cidr , i . e . 1.1.1.1/32 or ' any '
: param int duration : length of time to blacklist
Both the system and engine context blacklist allow kw to be passed
to provide additional functionality such as adding source and destination
ports or port ranges and specifying the protocol . The following parameters
define the ` ` kw ` ` that can be passed .
The following example shows creating an engine context blacklist
using additional kw : :
engine . blacklist ( ' 1.1.1.1/32 ' , ' 2.2.2.2/32 ' , duration = 3600,
src _ port1 = 1000 , src _ port2 = 1500 , src _ proto = ' predefined _ udp ' ,
dst _ port1 = 3 , dst _ port2 = 3000 , dst _ proto = ' predefined _ udp ' )
: param int src _ port1 : start source port to limit blacklist
: param int src _ port2 : end source port to limit blacklist
: param str src _ proto : source protocol . Either ' predefined _ tcp '
or ' predefined _ udp ' . ( default : ' predefined _ tcp ' )
: param int dst _ port1 : start dst port to limit blacklist
: param int dst _ port2 : end dst port to limit blacklist
: param str dst _ proto : dst protocol . Either ' predefined _ tcp '
or ' predefined _ udp ' . ( default : ' predefined _ tcp ' )
. . note : : if blocking a range of ports , use both src _ port1 and
src _ port2 , otherwise providing only src _ port1 is adequate . The
same applies to dst _ port1 / dst _ port2 . In addition , if you provide
src _ portX but not dst _ portX ( or vice versa ) , the undefined port
side definition will default to all ports .""" | json = { }
directions = { src : 'end_point1' , dst : 'end_point2' }
for direction , key in directions . items ( ) :
json [ key ] = { 'address_mode' : 'any' } if 'any' in direction . lower ( ) else { 'address_mode' : 'address' , 'ip_network' : direction }
if src_port1 :
json . setdefault ( 'end_point1' ) . update ( port1 = src_port1 , port2 = src_port2 or src_port1 , port_mode = src_proto )
if dst_port1 :
json . setdefault ( 'end_point2' ) . update ( port1 = dst_port1 , port2 = dst_port2 or dst_port1 , port_mode = dst_proto )
json . update ( duration = duration )
return json |
def handle_lines ( self ) :
"""Assemble incoming data into per - line packets .""" | while "\r\n" in self . buffer :
line , self . buffer = self . buffer . split ( "\r\n" , 1 )
if valid_packet ( line ) :
self . handle_raw_packet ( line )
else :
log . warning ( 'dropping invalid data: %s' , line ) |
def field ( self , name ) :
"""Returns the field on this struct with the given name . Will try to find this
name on all ancestors if this struct extends another .
If found , returns a dict with keys : ' name ' , ' comment ' , ' type ' , ' is _ array '
If not found , returns None
: Parameters :
name
string name of field to lookup""" | if self . fields . has_key ( name ) :
return self . fields [ name ]
elif self . extends :
if not self . parent :
self . parent = self . contract . struct ( self . extends )
return self . parent . field ( name )
else :
return None |
def _read_header ( self ) :
"""Get the needed header information to initialize dataset .""" | self . _header = self . cdmrf . fetch_header ( )
self . load_from_stream ( self . _header ) |
def _arithmetic_helper ( a : "BitVecFunc" , b : Union [ BitVec , int ] , operation : Callable ) -> "BitVecFunc" :
"""Helper function for arithmetic operations on BitVecFuncs .
: param a : The BitVecFunc to perform the operation on .
: param b : A BitVec or int to perform the operation on .
: param operation : The arithmetic operation to perform .
: return : The resulting BitVecFunc""" | if isinstance ( b , int ) :
b = BitVec ( z3 . BitVecVal ( b , a . size ( ) ) )
raw = operation ( a . raw , b . raw )
union = a . annotations + b . annotations
if isinstance ( b , BitVecFunc ) : # TODO : Find better value to set input and name to in this case ?
return BitVecFunc ( raw = raw , func_name = None , input_ = None , annotations = union )
return BitVecFunc ( raw = raw , func_name = a . func_name , input_ = a . input_ , annotations = union ) |
def set_major ( self ) :
"""Increment the major number of project""" | old_version = self . get_version ( )
new_version = str ( int ( old_version . split ( '.' , 5 ) [ 0 ] ) + 1 ) + '.0.0'
self . set_version ( old_version , new_version ) |
def rotatePolygon ( polygon , theta , origin = None ) :
"""Rotates the given polygon around the origin or if not given it ' s center of mass
polygon : np . array ( ( x1 , y1 ) , ( . . . ) )
theta : rotation clockwise in RADIAN
origin = [ x , y ] - if not given set to center of gravity
returns : None""" | if origin is None :
origin = np . mean ( polygon , axis = 0 , dtype = polygon . dtype )
# polygon = polygon . copy ( )
polygon -= origin
for n , corner in enumerate ( polygon ) :
polygon [ n ] = corner [ 0 ] * np . cos ( theta ) - corner [ 1 ] * np . sin ( theta ) , corner [ 0 ] * np . sin ( theta ) + corner [ 1 ] * np . cos ( theta )
polygon += origin
return polygon |
def device_connect ( device_id ) :
"""Force a connection attempt via HTTP GET .""" | success = False
if device_id in devices :
devices [ device_id ] . connect ( )
success = True
return jsonify ( success = success ) |
def hide_routemap_holder_route_map_content_set_origin_origin_igp ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
hide_routemap_holder = ET . SubElement ( config , "hide-routemap-holder" , xmlns = "urn:brocade.com:mgmt:brocade-ip-policy" )
route_map = ET . SubElement ( hide_routemap_holder , "route-map" )
name_key = ET . SubElement ( route_map , "name" )
name_key . text = kwargs . pop ( 'name' )
action_rm_key = ET . SubElement ( route_map , "action-rm" )
action_rm_key . text = kwargs . pop ( 'action_rm' )
instance_key = ET . SubElement ( route_map , "instance" )
instance_key . text = kwargs . pop ( 'instance' )
content = ET . SubElement ( route_map , "content" )
set = ET . SubElement ( content , "set" )
origin = ET . SubElement ( set , "origin" )
origin_igp = ET . SubElement ( origin , "origin-igp" )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def fasta_from_biom ( table , fasta_file_name ) :
'''Save sequences from a biom table to a fasta file
Parameters
table : biom . Table
The biom table containing the sequences
fasta _ file _ name : str
Name of the fasta output file''' | logger = logging . getLogger ( __name__ )
logger . debug ( 'saving biom table sequences to fasta file %s' % fasta_file_name )
with open ( fasta_file_name , 'w' ) as f :
for cseq in table . ids ( axis = 'observation' ) :
f . write ( '>%s\n%s\n' % ( cseq , cseq ) )
logger . info ( 'saved biom table sequences to fasta file %s' % fasta_file_name ) |
def rev_after ( self , rev : int ) -> int :
"""Return the earliest future rev on which the value will change .""" | self . seek ( rev )
if self . _future :
return self . _future [ - 1 ] [ 0 ] |
def add_to_loader_class ( cls , loader_class = None , tag = None , ** kwargs ) : # type : ( type ( yaml . Loader ) , str , * * str ) - > YamlIncludeConstructor
"""Create an instance of the constructor , and add it to the YAML ` Loader ` class
: param loader _ class : The ` Loader ` class add constructor to .
. . attention : : This parameter * * SHOULD * * be a * * class type * * , * * NOT * * object .
It ' s one of following :
- : class : ` yaml . BaseLoader `
- : class : ` yaml . UnSafeLoader `
- : class : ` yaml . SafeLoader `
- : class : ` yaml . Loader `
- : class : ` yaml . FullLoader `
- : class : ` yaml . CBaseLoader `
- : class : ` yaml . CUnSafeLoader `
- : class : ` yaml . CSafeLoader `
- : class : ` yaml . CLoader `
- : class : ` yaml . CFullLoader `
: default : ` ` None ` ` :
- When : mod : ` pyyaml ` 3 . * : Add to PyYAML ' s default ` Loader `
- When : mod : ` pyyaml ` 5 . * : Add to ` FullLoader `
: type loader _ class : type
: param str tag : Tag ' s name of the include constructor .
: default : ` ` " " ` ` : Use : attr : ` DEFAULT _ TAG _ NAME ` as tag name .
: param kwargs : Arguments passed to construct function
: return : New created object
: rtype : YamlIncludeConstructor""" | if tag is None :
tag = ''
tag = tag . strip ( )
if not tag :
tag = cls . DEFAULT_TAG_NAME
if not tag . startswith ( '!' ) :
raise ValueError ( '`tag` argument should start with character "!"' )
instance = cls ( ** kwargs )
if loader_class is None :
if FullLoader :
yaml . add_constructor ( tag , instance , FullLoader )
else :
yaml . add_constructor ( tag , instance )
else :
yaml . add_constructor ( tag , instance , loader_class )
return instance |
def setDragQuery ( self , query ) :
"""Sets the query that should be used when this record is dragged . This
value will be set into the application / x - query format for mime
data .
: param query | < orb . Query > | | None""" | if query is not None :
self . setDragData ( 'application/x-orb-query' , query . toXmlString ( ) )
else :
self . setDragData ( 'application/x-orb-query' , None ) |
def get_backend_init_list ( backend_vals ) :
"""Turn backend config dict into command line items .""" | cmd_list = [ ]
for ( key , val ) in backend_vals . items ( ) :
cmd_list . append ( '-backend-config' )
cmd_list . append ( key + '=' + val )
return cmd_list |
def allow_ast_comparison ( ) :
"""This ugly little monkey - patcher adds in a helper class
to all the AST node types . This helper class allows
eq / ne comparisons to work , so that entire trees can
be easily compared by Python ' s comparison machinery .
Used by the anti8 functions to compare old and new ASTs .
Could also be used by the test library .""" | class CompareHelper ( object ) :
def __eq__ ( self , other ) :
return type ( self ) == type ( other ) and vars ( self ) == vars ( other )
def __ne__ ( self , other ) :
return type ( self ) != type ( other ) or vars ( self ) != vars ( other )
for item in vars ( ast ) . values ( ) :
if type ( item ) != type :
continue
if issubclass ( item , ast . AST ) :
try :
item . __bases__ = tuple ( list ( item . __bases__ ) + [ CompareHelper ] )
except TypeError :
pass |
def _run_includemes ( configurator , includemes ) :
"""Automatically include packages defined in * * include * * configuration key .
: param pyramid . config . Configurator configurator : pyramid ' s app configurator
: param dict includemes : include , a list of includes or dictionary""" | for include in includemes :
if includemes [ include ] :
try :
configurator . include ( include , includemes [ include ] )
except AttributeError :
configurator . include ( include ) |
def exchange_code_for_token ( self , authorization_code ) : # type : ( str ) - > se _ leg _ op . access _ token . AccessToken
"""Exchanges an authorization code for an access token .""" | if authorization_code not in self . authorization_codes :
raise InvalidAuthorizationCode ( '{} unknown' . format ( authorization_code ) )
authz_info = self . authorization_codes [ authorization_code ]
if authz_info [ 'used' ] :
logger . debug ( 'detected already used authz_code=%s' , authorization_code )
raise InvalidAuthorizationCode ( '{} has already been used' . format ( authorization_code ) )
elif authz_info [ 'exp' ] < int ( time . time ( ) ) :
logger . debug ( 'detected expired authz_code=%s, now=%s > exp=%s ' , authorization_code , int ( time . time ( ) ) , authz_info [ 'exp' ] )
raise InvalidAuthorizationCode ( '{} has expired' . format ( authorization_code ) )
authz_info [ 'used' ] = True
access_token = self . _create_access_token ( authz_info [ 'sub' ] , authz_info [ self . KEY_AUTHORIZATION_REQUEST ] , authz_info [ 'granted_scope' ] )
logger . debug ( 'authz_code=%s exchanged to access_token=%s' , authorization_code , access_token . value )
return access_token |
def find_distinct ( self , collection , key ) :
"""Search a collection for the distinct key values provided .
Args :
collection : The db collection . See main class documentation .
key : The name of the key to find distinct values . For example with
the indicators collection , the key could be " type " .
Returns :
List of distinct values .""" | obj = getattr ( self . db , collection )
result = obj . distinct ( key )
return result |
def resize ( self , newWidth = 0 , newHeight = 0 ) :
"""\~english
Resize width and height of rectangles
@ param newWidth : new width value
@ param newHeight : new height value
\~chinese
重新设定矩形高宽
@ param newWidth : 新宽度
@ param newHeight : 新高度""" | self . height = newHeight
self . width = newWidth |
def chunks ( iterable , n ) :
"""A python generator that yields 100 - length sub - list chunks .
Input : - full _ list : The input list that is to be separated in chunks of 100.
- chunk _ size : Should be set to 100 , unless the Twitter API changes .
Yields : - sub _ list : List chunks of length 100.""" | for i in np . arange ( 0 , len ( iterable ) , n ) :
yield iterable [ i : i + n ] |
def _map_arguments ( self , args ) :
"""Map from the top - level arguments to the arguments provided to
the indiviudal links""" | comp_file = args . get ( 'comp' , None )
datafile = args . get ( 'data' , None )
if is_null ( comp_file ) :
return
if is_null ( datafile ) :
return
NAME_FACTORY . update_base_dict ( datafile )
outdir = args . get ( 'outdir' , None )
outkey = args . get ( 'outkey' , None )
ft1file = args [ 'ft1file' ]
if is_null ( outdir ) or is_null ( outkey ) :
return
pfiles = os . path . join ( outdir , outkey )
self . comp_dict = yaml . safe_load ( open ( comp_file ) )
coordsys = self . comp_dict . pop ( 'coordsys' )
full_out_dir = make_nfs_path ( os . path . join ( outdir , outkey ) )
for key_e , comp_e in sorted ( self . comp_dict . items ( ) ) :
emin = math . pow ( 10. , comp_e [ 'log_emin' ] )
emax = math . pow ( 10. , comp_e [ 'log_emax' ] )
enumbins = comp_e [ 'enumbins' ]
zmax = comp_e [ 'zmax' ]
zcut = "zmax%i" % comp_e [ 'zmax' ]
evclassstr = NAME_FACTORY . base_dict [ 'evclass' ]
kwargs_select = dict ( zcut = zcut , ebin = key_e , psftype = 'ALL' , coordsys = coordsys , mktime = 'none' )
selectfile_energy = make_full_path ( outdir , outkey , NAME_FACTORY . select ( ** kwargs_select ) )
linkname = 'select-energy-%s-%s' % ( key_e , zcut )
self . _set_link ( linkname , Gtlink_select , infile = ft1file , outfile = selectfile_energy , zmax = zmax , emin = emin , emax = emax , evclass = NAME_FACTORY . evclassmask ( evclassstr ) , pfiles = pfiles , logfile = os . path . join ( full_out_dir , "%s.log" % linkname ) )
if 'evtclasses' in comp_e :
evtclasslist_vals = comp_e [ 'evtclasses' ]
else :
evtclasslist_vals = [ NAME_FACTORY . base_dict [ 'evclass' ] ]
for evtclassval in evtclasslist_vals :
for psf_type , psf_dict in sorted ( comp_e [ 'psf_types' ] . items ( ) ) :
linkname_select = 'select-type-%s-%s-%s-%s' % ( key_e , zcut , evtclassval , psf_type )
linkname_bin = 'bin-%s-%s-%s-%s' % ( key_e , zcut , evtclassval , psf_type )
hpx_order = psf_dict [ 'hpx_order' ]
kwargs_bin = kwargs_select . copy ( )
kwargs_bin [ 'psftype' ] = psf_type
selectfile_psf = make_full_path ( outdir , outkey , NAME_FACTORY . select ( ** kwargs_bin ) )
binfile = make_full_path ( outdir , outkey , NAME_FACTORY . ccube ( ** kwargs_bin ) )
self . _set_link ( linkname_select , Gtlink_select , infile = selectfile_energy , outfile = selectfile_psf , zmax = zmax , emin = emin , emax = emax , evtype = EVT_TYPE_DICT [ psf_type ] , evclass = NAME_FACTORY . evclassmask ( evtclassval ) , pfiles = pfiles , logfile = os . path . join ( full_out_dir , "%s.log" % linkname_select ) )
self . _set_link ( linkname_bin , Gtlink_bin , coordsys = coordsys , hpx_order = hpx_order , evfile = selectfile_psf , outfile = binfile , emin = emin , emax = emax , enumbins = enumbins , pfiles = pfiles , logfile = os . path . join ( full_out_dir , "%s.log" % linkname_bin ) ) |
def start ( self ) :
"""Start the instance .""" | rs = self . connection . start_instances ( [ self . id ] )
if len ( rs ) > 0 :
self . _update ( rs [ 0 ] ) |
def _GetCh ( self ) :
"""Read a single character from the user .
Returns :
A string , the character read .""" | fd = self . _tty . fileno ( )
old = termios . tcgetattr ( fd )
try :
tty . setraw ( fd )
ch = self . _tty . read ( 1 )
# Also support arrow key shortcuts ( escape + 2 chars )
if ord ( ch ) == 27 :
ch += self . _tty . read ( 2 )
finally :
termios . tcsetattr ( fd , termios . TCSADRAIN , old )
return ch |
def _start_services ( self , console_env ) :
"""Overrides superclass .""" | self . _ad . load_snippet ( name = 'snippet' , package = self . _package )
console_env [ 'snippet' ] = self . _ad . snippet
console_env [ 's' ] = self . _ad . snippet |
def get_product_historic_rates ( self , product_id , start = None , end = None , granularity = None ) :
"""Historic rates for a product .
Rates are returned in grouped buckets based on requested
` granularity ` . If start , end , and granularity aren ' t provided ,
the exchange will assume some ( currently unknown ) default values .
Historical rate data may be incomplete . No data is published for
intervals where there are no ticks .
* * Caution * * : Historical rates should not be polled frequently .
If you need real - time information , use the trade and book
endpoints along with the websocket feed .
The maximum number of data points for a single request is 200
candles . If your selection of start / end time and granularity
will result in more than 200 data points , your request will be
rejected . If you wish to retrieve fine granularity data over a
larger time range , you will need to make multiple requests with
new start / end ranges .
Args :
product _ id ( str ) : Product
start ( Optional [ str ] ) : Start time in ISO 8601
end ( Optional [ str ] ) : End time in ISO 8601
granularity ( Optional [ int ] ) : Desired time slice in seconds
Returns :
list : Historic candle data . Example :
[ time , low , high , open , close , volume ] ,
[ 1415398768 , 0.32 , 4.2 , 0.35 , 4.2 , 12.3 ] ,""" | params = { }
if start is not None :
params [ 'start' ] = start
if end is not None :
params [ 'end' ] = end
if granularity is not None :
acceptedGrans = [ 60 , 300 , 900 , 3600 , 21600 , 86400 ]
if granularity not in acceptedGrans :
raise ValueError ( 'Specified granularity is {}, must be in approved values: {}' . format ( granularity , acceptedGrans ) )
params [ 'granularity' ] = granularity
return self . _send_message ( 'get' , '/products/{}/candles' . format ( product_id ) , params = params ) |
def cmd ( * args , ** kwargs ) :
"""Decorate a callable to replace it with a manufactured command
class .
Extends the interface of ` ` CommandDecorator ` ` , allowing the same
` ` cmd ` ` to be used as a decorator or as a decorator factory : :
@ cmd ( root = True )
def build ( ) :
@ build . register
@ cmd
def deploy ( ) :
Further enables composition of configuration , for example via
partials , as helpers .""" | try :
( first , * remainder ) = args
except ValueError :
pass
else :
if callable ( first ) :
return CommandDecorator ( * remainder , ** kwargs ) ( first )
return CommandDecorator ( * args , ** kwargs ) |
def make_data_classif ( dataset , n , nz = .5 , theta = 0 , random_state = None , ** kwargs ) :
"""dataset generation for classification problems
Parameters
dataset : str
type of classification problem ( see code )
n : int
number of training samples
nz : float
noise level ( > 0)
random _ state : int , RandomState instance or None , optional ( default = None )
If int , random _ state is the seed used by the random number generator ;
If RandomState instance , random _ state is the random number generator ;
If None , the random number generator is the RandomState instance used
by ` np . random ` .
Returns
X : np . array ( n , d )
n observation of size d
y : np . array ( n , )
labels of the samples""" | generator = check_random_state ( random_state )
if dataset . lower ( ) == '3gauss' :
y = np . floor ( ( np . arange ( n ) * 1.0 / n * 3 ) ) + 1
x = np . zeros ( ( n , 2 ) )
# class 1
x [ y == 1 , 0 ] = - 1.
x [ y == 1 , 1 ] = - 1.
x [ y == 2 , 0 ] = - 1.
x [ y == 2 , 1 ] = 1.
x [ y == 3 , 0 ] = 1.
x [ y == 3 , 1 ] = 0
x [ y != 3 , : ] += 1.5 * nz * generator . randn ( sum ( y != 3 ) , 2 )
x [ y == 3 , : ] += 2 * nz * generator . randn ( sum ( y == 3 ) , 2 )
elif dataset . lower ( ) == '3gauss2' :
y = np . floor ( ( np . arange ( n ) * 1.0 / n * 3 ) ) + 1
x = np . zeros ( ( n , 2 ) )
y [ y == 4 ] = 3
# class 1
x [ y == 1 , 0 ] = - 2.
x [ y == 1 , 1 ] = - 2.
x [ y == 2 , 0 ] = - 2.
x [ y == 2 , 1 ] = 2.
x [ y == 3 , 0 ] = 2.
x [ y == 3 , 1 ] = 0
x [ y != 3 , : ] += nz * generator . randn ( sum ( y != 3 ) , 2 )
x [ y == 3 , : ] += 2 * nz * generator . randn ( sum ( y == 3 ) , 2 )
elif dataset . lower ( ) == 'gaussrot' :
rot = np . array ( [ [ np . cos ( theta ) , np . sin ( theta ) ] , [ - np . sin ( theta ) , np . cos ( theta ) ] ] )
m1 = np . array ( [ - 1 , 1 ] )
m2 = np . array ( [ 1 , - 1 ] )
y = np . floor ( ( np . arange ( n ) * 1.0 / n * 2 ) ) + 1
n1 = np . sum ( y == 1 )
n2 = np . sum ( y == 2 )
x = np . zeros ( ( n , 2 ) )
x [ y == 1 , : ] = get_2D_samples_gauss ( n1 , m1 , nz , random_state = generator )
x [ y == 2 , : ] = get_2D_samples_gauss ( n2 , m2 , nz , random_state = generator )
x = x . dot ( rot )
else :
x = np . array ( 0 )
y = np . array ( 0 )
print ( "unknown dataset" )
return x , y . astype ( int ) |
def get_automated_runs ( ) :
"""Return all automated runs""" | path = functions . get_path_from_query_string ( request )
if request . method == 'GET' :
with functions . DBContextManager ( path ) as session :
automated_runs = session . query ( models . AutomatedRun ) . all ( )
return jsonify ( list ( map ( lambda x : x . serialize , automated_runs ) ) )
if request . method == 'POST' :
req_body = request . get_json ( )
with functions . DBContextManager ( path ) as session :
base_learner_origin = None
if req_body [ 'category' ] == 'bayes' or req_body [ 'category' ] == 'greedy_ensemble_search' :
base_learner_origin = session . query ( models . BaseLearnerOrigin ) . filter_by ( id = req_body [ 'base_learner_origin_id' ] ) . first ( )
if base_learner_origin is None :
raise exceptions . UserError ( 'Base learner origin {} not found' . format ( req_body [ 'base_learner_origin_id' ] ) , 404 )
if not base_learner_origin . final :
raise exceptions . UserError ( 'Base learner origin {} is not final' . format ( req_body [ 'base_learner_origin_id' ] ) )
elif req_body [ 'category' ] == 'tpot' :
pass
else :
raise exceptions . UserError ( 'Automated run category' ' {} not recognized' . format ( req_body [ 'category' ] ) )
# Check for any syntax errors
module = functions . import_string_code_as_module ( req_body [ 'source' ] )
del module
automated_run = models . AutomatedRun ( req_body [ 'source' ] , 'queued' , req_body [ 'category' ] , base_learner_origin )
session . add ( automated_run )
session . commit ( )
with Connection ( get_redis_connection ( ) ) :
rqtasks . start_automated_run . delay ( path , automated_run . id )
return jsonify ( automated_run . serialize ) |
def general_acquisition_info ( metadata ) :
"""General sentence on data acquisition . Should be first sentence in MRI data
acquisition section .
Parameters
metadata : : obj : ` dict `
The metadata for the dataset .
Returns
out _ str : : obj : ` str `
Output string with scanner information .""" | out_str = ( 'MR data were acquired using a {tesla}-Tesla {manu} {model} ' 'MRI scanner.' )
out_str = out_str . format ( tesla = metadata . get ( 'MagneticFieldStrength' , 'UNKNOWN' ) , manu = metadata . get ( 'Manufacturer' , 'MANUFACTURER' ) , model = metadata . get ( 'ManufacturersModelName' , 'MODEL' ) )
return out_str |
def _parse ( self ) :
"""Parses raw data""" | for i in range ( len ( self . data ) ) :
self . _parse_row ( i ) |
def evaluate_cartesian_multi ( self , param_vals , _verify = True ) :
r"""Compute multiple points on the surface .
Assumes ` ` param _ vals ` ` has two columns of Cartesian coordinates .
See : meth : ` evaluate _ cartesian ` for more details on how each row of
parameter values is evaluated .
. . image : : . . / . . / images / surface _ evaluate _ cartesian _ multi . png
: align : center
. . doctest : : surface - eval - multi1
: options : + NORMALIZE _ WHITESPACE
> > > nodes = np . asfortranarray ( [
. . . [ 0.0 , 2.0 , - 3.0 ] ,
. . . [ 0.0 , 1.0 , 2.0 ] ,
> > > surface = bezier . Surface ( nodes , degree = 1)
> > > surface
< Surface ( degree = 1 , dimension = 2 ) >
> > > param _ vals = np . asfortranarray ( [
. . . [ 0.0 , 0.0 ] ,
. . . [ 0.125 , 0.625 ] ,
. . . [ 0.5 , 0.5 ] ,
> > > points = surface . evaluate _ cartesian _ multi ( param _ vals )
> > > points
array ( [ [ 0 . , - 1.625 , - 0.5 ] ,
[ 0 . , 1.375 , 1.5 ] ] )
. . testcleanup : : surface - eval - multi1
import make _ images
make _ images . surface _ evaluate _ cartesian _ multi ( surface , points )
Args :
param _ vals ( numpy . ndarray ) : Array of parameter values ( as a
` ` N x 2 ` ` array ) .
_ verify ( Optional [ bool ] ) : Indicates if the coordinates should be
verified . See : meth : ` evaluate _ cartesian ` . Defaults to
: data : ` True ` . Will also double check that ` ` param _ vals ` `
is the right shape .
Returns :
numpy . ndarray : The points on the surface .
Raises :
ValueError : If ` ` param _ vals ` ` is not a 2D array and
` ` _ verify = True ` ` .""" | if _verify :
if param_vals . ndim != 2 :
raise ValueError ( "Parameter values must be 2D array" )
for s , t in param_vals :
self . _verify_cartesian ( s , t )
return _surface_helpers . evaluate_cartesian_multi ( self . _nodes , self . _degree , param_vals , self . _dimension ) |
def request_timeout_hint ( timeout_hint ) :
"""Decorator ; add recommended client timeout hint to a request for request
Useful for requests that take longer than average to reply . Hint is provided
to clients via ? request - timeout - hint . Note this is only exposed if the
device server sets the protocol version to KATCP v5.1 or higher and enables
the REQUEST _ TIMEOUT _ HINTS flag in its PROTOCOL _ INFO class attribute
Parameters
timeout _ hint : float ( seconds ) or None
How long the decorated request should reasonably take to reply . No
timeout hint if None , similar to never using the decorator , provided for
consistency .
Examples
> > > class MyDevice ( DeviceServer ) :
. . . @ return _ reply ( Int ( ) )
. . . @ request _ timeout _ hint ( 15 ) # Set request timeout hint to 15 seconds
. . . @ tornado . gen . coroutine
. . . def request _ myreq ( self , req ) :
. . . ' ' ' A slow request ' ' '
. . . result = yield self . slow _ operation ( )
. . . raise tornado . gen . Return ( ( req , result ) )""" | if timeout_hint is not None :
timeout_hint = float ( timeout_hint )
def decorator ( handler ) :
handler . request_timeout_hint = timeout_hint
return handler
return decorator |
def find_xenon_grpc_jar ( ) :
"""Find the Xenon - GRPC jar - file , windows version .""" | prefix = Path ( sys . prefix )
locations = [ prefix / 'lib' , prefix / 'local' / 'lib' ]
for location in locations :
jar_file = location / 'xenon-grpc-{}-all.jar' . format ( xenon_grpc_version )
if not jar_file . exists ( ) :
continue
else :
return str ( jar_file )
return None |
def put ( self , key , value , minutes ) :
"""Store an item in the cache for a given number of minutes .
: param key : The cache key
: type key : str
: param value : The cache value
: type value : mixed
: param minutes : The lifetime in minutes of the cached value
: type minutes : int""" | self . _memcache . set ( self . _prefix + key , value , minutes * 60 ) |
def plot_ts ( ax , agemin , agemax , timescale = 'gts12' , ylabel = "Age (Ma)" ) :
"""Make a time scale plot between specified ages .
Parameters :
ax : figure object
agemin : Minimum age for timescale
agemax : Maximum age for timescale
timescale : Time Scale [ default is Gradstein et al . , ( 2012 ) ]
for other options see pmag . get _ ts ( )
ylabel : if set , plot as ylabel""" | ax . set_title ( timescale . upper ( ) )
ax . axis ( [ - .25 , 1.5 , agemax , agemin ] )
ax . axes . get_xaxis ( ) . set_visible ( False )
# get dates and chron names for timescale
TS , Chrons = pmag . get_ts ( timescale )
X , Y , Y2 = [ 0 , 1 ] , [ ] , [ ]
cnt = 0
if agemin < TS [ 1 ] : # in the Brunhes
Y = [ agemin , agemin ]
# minimum age
Y1 = [ TS [ 1 ] , TS [ 1 ] ]
# age of the B / M boundary
ax . fill_between ( X , Y , Y1 , facecolor = 'black' )
# color in Brunhes , black
for d in TS [ 1 : ] :
pol = cnt % 2
cnt += 1
if d <= agemax and d >= agemin :
ind = TS . index ( d )
Y = [ TS [ ind ] , TS [ ind ] ]
Y1 = [ TS [ ind + 1 ] , TS [ ind + 1 ] ]
if pol : # fill in every other time
ax . fill_between ( X , Y , Y1 , facecolor = 'black' )
ax . plot ( [ 0 , 1 , 1 , 0 , 0 ] , [ agemin , agemin , agemax , agemax , agemin ] , 'k-' )
plt . yticks ( np . arange ( agemin , agemax + 1 , 1 ) )
if ylabel != "" :
ax . set_ylabel ( ylabel )
ax2 = ax . twinx ( )
ax2 . axis ( 'off' )
for k in range ( len ( Chrons ) - 1 ) :
c = Chrons [ k ]
cnext = Chrons [ k + 1 ]
d = cnext [ 1 ] - ( cnext [ 1 ] - c [ 1 ] ) / 3.
if d >= agemin and d < agemax : # make the Chron boundary tick
ax2 . plot ( [ 1 , 1.5 ] , [ c [ 1 ] , c [ 1 ] ] , 'k-' )
ax2 . text ( 1.05 , d , c [ 0 ] )
ax2 . axis ( [ - .25 , 1.5 , agemax , agemin ] ) |
def task_transaction ( channel ) :
"""Ensures a task is fetched and acknowledged atomically .""" | with channel . lock :
if channel . poll ( 0 ) :
task = channel . recv ( )
channel . send ( Acknowledgement ( os . getpid ( ) , task . id ) )
else :
raise RuntimeError ( "Race condition between workers" )
return task |
def nginx_access ( line ) :
'''> > > import pprint
> > > input _ line1 = ' { " remote _ addr " : " 127.0.0.1 " , " remote _ user " : " - " , " timestamp " : " 1515144699.201 " , " request " : " GET / HTTP / 1.1 " , " status " : " 200 " , " request _ time " : " 0.000 " , " body _ bytes _ sent " : " 396 " , " http _ referer " : " - " , " http _ user _ agent " : " python - requests / 2.18.4 " , " http _ x _ forwarded _ for " : " - " , " upstream _ response _ time " : " - " } '
> > > output _ line1 = nginx _ access ( input _ line1)
> > > pprint . pprint ( output _ line1)
{ ' data ' : { u ' body _ bytes _ sent ' : 396.0,
u ' http _ referer ' : u ' - ' ,
u ' http _ user _ agent ' : u ' python - requests / 2.18.4 ' ,
u ' http _ x _ forwarded _ for ' : u ' - ' ,
u ' remote _ addr ' : u ' 127.0.0.1 ' ,
u ' remote _ user ' : u ' - ' ,
u ' request ' : u ' GET / HTTP / 1.1 ' ,
u ' request _ time ' : 0.0,
u ' status ' : u ' 200 ' ,
u ' timestamp ' : ' 2018-01-05T09:31:39.201000 ' ,
u ' upstream _ response _ time ' : 0.0 } ,
' event ' : ' nginx _ event ' ,
' timestamp ' : ' 2018-01-05T09:31:39.201000 ' ,
' type ' : ' metric ' }
> > > input _ line2 = ' { " remote _ addr " : " 192.158.0.51 " , " remote _ user " : " - " , " timestamp " : " 1515143686.415 " , " request " : " POST / mpub ? topic = heartbeat HTTP / 1.1 " , " status " : " 404 " , " request _ time " : " 0.000 " , " body _ bytes _ sent " : " 152 " , " http _ referer " : " - " , " http _ user _ agent " : " python - requests / 2.18.4 " , " http _ x _ forwarded _ for " : " - " , " upstream _ response _ time " : " - " } '
> > > output _ line2 = nginx _ access ( input _ line2)
> > > pprint . pprint ( output _ line2)
{ ' data ' : { u ' body _ bytes _ sent ' : 152.0,
u ' http _ referer ' : u ' - ' ,
u ' http _ user _ agent ' : u ' python - requests / 2.18.4 ' ,
u ' http _ x _ forwarded _ for ' : u ' - ' ,
u ' remote _ addr ' : u ' 192.158.0.51 ' ,
u ' remote _ user ' : u ' - ' ,
u ' request ' : u ' POST / mpub ? topic = heartbeat HTTP / 1.1 ' ,
u ' request _ time ' : 0.0,
u ' status ' : u ' 404 ' ,
u ' timestamp ' : ' 2018-01-05T09:14:46.415000 ' ,
u ' upstream _ response _ time ' : 0.0 } ,
' event ' : ' nginx _ event ' ,
' timestamp ' : ' 2018-01-05T09:14:46.415000 ' ,
' type ' : ' metric ' }''' | # TODO Handle nginx error logs
log = json . loads ( line )
timestamp_iso = datetime . datetime . utcfromtimestamp ( float ( log [ 'timestamp' ] ) ) . isoformat ( )
log . update ( { 'timestamp' : timestamp_iso } )
if '-' in log . get ( 'upstream_response_time' ) :
log [ 'upstream_response_time' ] = 0.0
log [ 'body_bytes_sent' ] = float ( log [ 'body_bytes_sent' ] )
log [ 'request_time' ] = float ( log [ 'request_time' ] )
log [ 'upstream_response_time' ] = float ( log [ 'upstream_response_time' ] )
return dict ( timestamp = log . get ( 'timestamp' , ' ' ) , data = log , type = 'metric' , event = 'nginx_event' , ) |
def get_filter_qobj ( self , keys = None ) :
"""Return a copy of this Query object with additional where clauses for the
keys in the argument""" | # only care about columns in aggregates right ?
cols = set ( )
for agg in self . select . aggregates :
cols . update ( agg . cols )
sels = [ SelectExpr ( col , [ col ] , col , None ) for col in cols ]
select = Select ( sels )
where = list ( self . where )
if keys :
keys = list ( keys )
keys = map ( sqlize , list ( keys ) )
expr = self . select . nonaggs [ 0 ] . expr
clause = [ ]
if None in keys :
clause . append ( "%s is null" % expr )
if len ( [ k for k in keys if k is not None ] ) > 0 :
clause . append ( "%s in %%s" % expr )
clause = " or " . join ( clause )
where . append ( clause )
else :
where . append ( '%s = %%s' % ( self . select . nonaggs [ 0 ] . expr ) )
q = Query ( self . db , select , self . fr , where )
return q |
def _visible_width ( s ) :
"""Visible width of a printed string . ANSI color codes are removed .
> > > _ visible _ width ( ' \x1b [31mhello \x1b [0m ' ) , _ visible _ width ( " world " )
(5 , 5)""" | if isinstance ( s , _text_type ) or isinstance ( s , _binary_type ) :
return len ( _strip_invisible ( s ) )
else :
return len ( _text_type ( s ) ) |
def acor ( self , k = 5 ) :
"""Autocorrelation time of the chain
return the autocorrelation time for each parameters
Inputs :
parameter in self - consistent window
Outputs :
autocorrelation time of the chain""" | try :
import acor
except ImportError :
print ( "Can't import acor, please download it." )
return 0
n = np . shape ( self . _chain ) [ 1 ]
t = np . zeros ( n )
for i in xrange ( n ) :
t [ i ] = acor . acor ( self . _chain [ : , i ] , k ) [ 0 ]
return t |
def lon_to_deg ( lon ) :
"""Convert longitude to degrees .""" | if isinstance ( lon , str ) and ( ':' in lon ) : # TODO : handle other coordinate systems
lon_deg = hmsStrToDeg ( lon )
else :
lon_deg = float ( lon )
return lon_deg |
def VAR_DECL ( self , cursor ) :
"""Handles Variable declaration .""" | # get the name
name = self . get_unique_name ( cursor )
log . debug ( 'VAR_DECL: name: %s' , name )
# Check for a previous declaration in the register
if self . is_registered ( name ) :
return self . get_registered ( name )
# get the typedesc object
_type = self . _VAR_DECL_type ( cursor )
# transform the ctypes values into ctypeslib
init_value = self . _VAR_DECL_value ( cursor , _type )
# finished
log . debug ( 'VAR_DECL: _type:%s' , _type . name )
log . debug ( 'VAR_DECL: _init:%s' , init_value )
log . debug ( 'VAR_DECL: location:%s' , getattr ( cursor , 'location' ) )
obj = self . register ( name , typedesc . Variable ( name , _type , init_value ) )
self . set_location ( obj , cursor )
self . set_comment ( obj , cursor )
return True |
def mirtrace_rna_categories ( self ) :
"""Generate the miRTrace RNA Categories""" | # Specify the order of the different possible categories
keys = OrderedDict ( )
keys [ 'reads_mirna' ] = { 'color' : '#33a02c' , 'name' : 'miRNA' }
keys [ 'reads_rrna' ] = { 'color' : '#ff7f00' , 'name' : 'rRNA' }
keys [ 'reads_trna' ] = { 'color' : '#1f78b4' , 'name' : 'tRNA' }
keys [ 'reads_artifact' ] = { 'color' : '#fb9a99' , 'name' : 'Artifact' }
keys [ 'reads_unknown' ] = { 'color' : '#d9d9d9' , 'name' : 'Unknown' }
# Config for the plot
config = { 'id' : 'mirtrace_rna_categories_plot' , 'title' : 'miRTrace: RNA Categories' , 'ylab' : '# Reads' , 'cpswitch_counts_label' : 'Number of Reads' }
return bargraph . plot ( self . summary_data , keys , config ) |
def snpsift ( self ) :
"""SnpSift""" | tstart = datetime . now ( )
# command = ' python % s / snpsift . py - i sanity _ check / checked . vcf 2 > log / snpsift . log ' % ( scripts _ dir )
# self . shell ( command )
ss = snpsift . SnpSift ( self . vcf_file )
ss . run ( )
tend = datetime . now ( )
execution_time = tend - tstart |
def delete_service ( self , service_id ) :
"""Delete a service .""" | content = self . _fetch ( "/service/%s" % service_id , method = "DELETE" )
return self . _status ( content ) |
def read ( self , url ) :
"""Read storage at a given url""" | params = self . _split_url ( url )
output_stream = io . BytesIO ( )
block_blob_service = self . _block_blob_service ( account_name = params [ "account" ] , sas_token = params [ "sas_token" ] )
block_blob_service . get_blob_to_stream ( container_name = params [ "container" ] , blob_name = params [ "blob" ] , stream = output_stream )
output_stream . seek ( 0 )
return [ line . decode ( "utf-8" ) for line in output_stream ] |
def v1_tag_associate ( request , tags , tag ) :
'''Associate an HTML element with a tag .
The association should be a JSON serialized object on the
request body . Here is an example association that should
make the object ' s structure clear :
. . code - block : : python
" url " : " http : / / example . com / abc / xyz ? foo = bar " ,
" text " : " The text the user highlighted . " ,
" stream _ id " : " { unix timestamp } - { md5 of url } " ,
" hash " : " { nilsimsa hash of the HTML } " ,
" timestamp " : { unix timestamp } ,
" xpath " : {
" start _ node " : " / html / body / p [ 1 ] / text ( ) [ 2 ] " ,
" start _ idx " : 3,
" end _ node " : " / html / body / p [ 1 ] / text ( ) [ 3 ] " ,
" end _ idx " : 9
All fields are required and cannot be empty or ` ` null ` ` .
The tag of the association should be specified in the URL
and is delimited by ` ` / / ` ` .''' | tag = tag . decode ( 'utf-8' ) . strip ( )
assoc = dict ( json . loads ( request . body . read ( ) ) , ** { 'tag' : tag } )
tags . add ( assoc ) |
def plot_series ( filename , plot_kwargs = None ) :
'''Plot series data from MonitorSeries output text file .
Args :
filename ( str ) : Path to * . series . txt file produced by : obj : ` ~ nnabla . MonitorSeries ` class .
plot _ kwags ( dict , optional ) :
Keyward arguments passed to : function : ` matplotlib . pyplot . plot ` .
Note :
matplotlib package is required .''' | import matplotlib . pyplot as plt
if plot_kwargs is None :
plot_kwargs = { }
data = np . genfromtxt ( filename , dtype = 'i8,f4' , names = [ 'k' , 'v' ] )
index = data [ 'k' ]
values = data [ 'v' ]
plt . plot ( index , values , ** plot_kwargs ) |
def tag_release ( message ) : # type : ( str , bool ) - > None
"""Tag the current commit with as the current version release .
This should be the same commit as the one that ' s uploaded as the release
( to pypi for example ) .
* * Example Config * * : :
version _ file : ' src / mypkg / _ _ init _ _ . py '
Examples : :
$ peltak release tag # Tag the current commit as release""" | from peltak . extra . gitflow import logic
logic . release . tag ( message ) |
def remove_tab ( self , index ) :
"""Overrides removeTab to emit tab _ closed and last _ tab _ closed signals .
: param index : index of the tab to remove .""" | widget = self . widget ( index )
try :
document = widget . document ( )
except AttributeError :
document = None
# not a QPlainTextEdit
clones = self . _close_widget ( widget )
self . tab_closed . emit ( widget )
self . removeTab ( index )
self . _restore_original ( clones )
widget . _original_tab_widget . _tabs . remove ( widget )
if self . count ( ) == 0 :
self . last_tab_closed . emit ( )
if SplittableTabWidget . tab_under_menu == widget :
SplittableTabWidget . tab_under_menu = None
if not clones :
widget . setParent ( None )
else :
try :
clones [ 0 ] . syntax_highlighter . setDocument ( document )
except AttributeError :
pass |
def publish_model ( args : argparse . Namespace , backend : StorageBackend , log : logging . Logger ) :
"""Push the model to Google Cloud Storage and updates the index file .
: param args : : class : ` argparse . Namespace ` with " model " , " backend " , " args " , " force " , " meta " " update _ default " , " username " , " password " , " remote _ repo " , " template _ model " , " template _ readme " and " log _ level " .
: param backend : Backend which is responsible for working with model files .
: param log : Logger supplied by supply _ backend
: return : None if successful , 1 otherwise .""" | path = os . path . abspath ( args . model )
try :
model = GenericModel ( source = path , dummy = True )
except ValueError as e :
log . critical ( '"model" must be a path: %s' , e )
return 1
except Exception as e :
log . critical ( "Failed to load the model: %s: %s" % ( type ( e ) . __name__ , e ) )
return 1
base_meta = model . meta
try :
model_url = backend . upload_model ( path , base_meta , args . force )
except ModelAlreadyExistsError :
return 1
log . info ( "Uploaded as %s" , model_url )
with open ( os . path . join ( args . meta ) , encoding = "utf-8" ) as _in :
extra_meta = json . load ( _in )
model_type , model_uuid = base_meta [ "model" ] , base_meta [ "uuid" ]
meta = extract_model_meta ( base_meta , extra_meta , model_url )
log . info ( "Updating the models index..." )
try :
template_model = backend . index . load_template ( args . template_model )
template_readme = backend . index . load_template ( args . template_readme )
except ValueError :
return 1
backend . index . add_model ( model_type , model_uuid , meta , template_model , args . update_default )
backend . index . update_readme ( template_readme )
try :
backend . index . upload ( "add" , { "model" : model_type , "uuid" : model_uuid } )
except ValueError : # TODO : replace with PorcelainError , see related TODO in index . py : 181
return 1
log . info ( "Successfully published." ) |
def concordance_index ( event_times , predicted_scores , event_observed = None ) :
"""Calculates the concordance index ( C - index ) between two series
of event times . The first is the real survival times from
the experimental data , and the other is the predicted survival
times from a model of some kind .
The c - index is the average of how often a model says X is greater than Y when , in the observed
data , X is indeed greater than Y . The c - index also handles how to handle censored values
( obviously , if Y is censored , it ' s hard to know if X is truly greater than Y ) .
The concordance index is a value between 0 and 1 where :
- 0.5 is the expected result from random predictions ,
- 1.0 is perfect concordance and ,
- 0.0 is perfect anti - concordance ( multiply predictions with - 1 to get 1.0)
Parameters
event _ times : iterable
a length - n iterable of observed survival times .
predicted _ scores : iterable
a length - n iterable of predicted scores - these could be survival times , or hazards , etc . See https : / / stats . stackexchange . com / questions / 352183 / use - median - survival - time - to - calculate - cph - c - statistic / 352435#352435
event _ observed : iterable , optional
a length - n iterable censorship flags , 1 if observed , 0 if not . Default None assumes all observed .
Returns
c - index : float
a value between 0 and 1.
References
Harrell FE , Lee KL , Mark DB . Multivariable prognostic models : issues in
developing models , evaluating assumptions and adequacy , and measuring and
reducing errors . Statistics in Medicine 1996;15(4 ) : 361-87.
Examples
> > > from lifelines . utils import concordance _ index
> > > cph = CoxPHFitter ( ) . fit ( df , ' T ' , ' E ' )
> > > concordance _ index ( df [ ' T ' ] , - cph . predict _ partial _ hazard ( df ) , df [ ' E ' ] )""" | event_times = np . asarray ( event_times , dtype = float )
predicted_scores = np . asarray ( predicted_scores , dtype = float )
# Allow for ( n , 1 ) or ( 1 , n ) arrays
if event_times . ndim == 2 and ( event_times . shape [ 0 ] == 1 or event_times . shape [ 1 ] == 1 ) : # Flatten array
event_times = event_times . ravel ( )
# Allow for ( n , 1 ) or ( 1 , n ) arrays
if predicted_scores . ndim == 2 and ( predicted_scores . shape [ 0 ] == 1 or predicted_scores . shape [ 1 ] == 1 ) : # Flatten array
predicted_scores = predicted_scores . ravel ( )
if event_times . shape != predicted_scores . shape :
raise ValueError ( "Event times and predictions must have the same shape" )
if event_times . ndim != 1 :
raise ValueError ( "Event times can only be 1-dimensional: (n,)" )
if event_observed is None :
event_observed = np . ones ( event_times . shape [ 0 ] , dtype = float )
else :
event_observed = np . asarray ( event_observed , dtype = float ) . ravel ( )
if event_observed . shape != event_times . shape :
raise ValueError ( "Observed events must be 1-dimensional of same length as event times" )
num_correct , num_tied , num_pairs = _concordance_summary_statistics ( event_times , predicted_scores , event_observed )
return _concordance_ratio ( num_correct , num_tied , num_pairs ) |
def solution ( self , b ) :
"""Checks whether an integer is solution of the current strided Interval
: param b : integer to check
: return : True if b belongs to the current Strided Interval , False otherwhise""" | if isinstance ( b , numbers . Number ) :
b = StridedInterval ( lower_bound = b , upper_bound = b , stride = 0 , bits = self . bits )
else :
raise ClaripyOperationError ( 'Oops, Strided intervals cannot be passed as "' 'parameter to function solution. To implement' )
if self . intersection ( b ) . is_empty :
return False
return True |
def __generate_string ( length ) : # pragma : no cover
"""Generate a string for password creation .""" | return '' . join ( SystemRandom ( ) . choice ( string . ascii_letters + string . digits ) for x in range ( length ) ) . encode ( ) |
def create_namespaced_replica_set ( self , namespace , body , ** kwargs ) : # noqa : E501
"""create _ namespaced _ replica _ set # noqa : E501
create a ReplicaSet # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . create _ namespaced _ replica _ set ( namespace , body , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str namespace : object name and auth scope , such as for teams and projects ( required )
: param V1beta1ReplicaSet body : ( required )
: param bool include _ uninitialized : If true , partially initialized resources are included in the response .
: param str pretty : If ' true ' , then the output is pretty printed .
: param str dry _ run : When present , indicates that modifications should not be persisted . An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request . Valid values are : - All : all dry run stages will be processed
: return : V1beta1ReplicaSet
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . create_namespaced_replica_set_with_http_info ( namespace , body , ** kwargs )
# noqa : E501
else :
( data ) = self . create_namespaced_replica_set_with_http_info ( namespace , body , ** kwargs )
# noqa : E501
return data |
def splitext ( p ) :
r"""Like the normal splitext ( in posixpath ) , but doesn ' t treat dotfiles
( e . g . . emacs ) as extensions . Also uses os . sep instead of ' / ' .""" | root , ext = os . path . splitext ( p )
# check for dotfiles
if ( not root or root [ - 1 ] == os . sep ) : # XXX : use ' / ' or os . sep here ? ? ?
return ( root + ext , "" )
else :
return root , ext |
def syncLayerData ( self , layerData = None ) :
"""Syncs the layer information for this item from the given layer data .
: param layerData | < dict > | | None""" | if not self . _layer :
return
if not layerData :
layerData = self . _layer . layerData ( )
self . setVisible ( layerData . get ( 'visible' , True ) )
if layerData . get ( 'current' ) : # set the default parameters
flags = self . ItemIsMovable
flags |= self . ItemIsSelectable
flags |= self . ItemIsFocusable
# need this flag for Qt 4.6 +
try :
flags |= self . ItemSendsGeometryChanges
except AttributeError :
pass
self . setFlags ( flags )
self . setAcceptHoverEvents ( True )
self . setZValue ( 100 )
else : # set the default parameters
self . setFlags ( self . ItemIsFocusable )
self . setAcceptHoverEvents ( True )
self . setZValue ( layerData . get ( 'zValue' , 0 ) )
self . update ( ) |
def isemhash_unbottleneck ( x , hidden_size , isemhash_filter_size_multiplier = 1.0 ) :
"""Improved semantic hashing un - bottleneck .""" | filter_size = int ( hidden_size * isemhash_filter_size_multiplier )
x = 0.5 * ( x - 1.0 )
# Move from [ - 1 , 1 ] to [ 0 , 1 ] .
with tf . variable_scope ( "isemhash_unbottleneck" ) :
h1a = tf . layers . dense ( x , filter_size , name = "hidden1a" )
h1b = tf . layers . dense ( 1.0 - x , filter_size , name = "hidden1b" )
h2 = tf . layers . dense ( tf . nn . relu ( h1a + h1b ) , filter_size , name = "hidden2" )
return tf . layers . dense ( tf . nn . relu ( h2 ) , hidden_size , name = "final" ) |
def get_locales ( self , package_name ) :
"""Retrieve a list of all available locales in a given packagename .
: param package _ name : the package name to get locales of""" | self . _analyse ( )
return list ( self . values [ package_name ] . keys ( ) ) |
def lifted_gate ( gate : Gate , n_qubits : int ) :
"""Lift a pyquil : py : class : ` Gate ` in a full ` ` n _ qubits ` ` - qubit Hilbert space .
This function looks up the matrix form of the gate and then dispatches to
: py : func : ` lifted _ gate _ matrix ` with the target qubits .
: param gate : A gate
: param n _ qubits : The total number of qubits .
: return : A 2 ^ n by 2 ^ n lifted version of the gate acting on its specified qubits .""" | if len ( gate . params ) > 0 :
matrix = QUANTUM_GATES [ gate . name ] ( * gate . params )
else :
matrix = QUANTUM_GATES [ gate . name ]
return lifted_gate_matrix ( matrix = matrix , qubit_inds = [ q . index for q in gate . qubits ] , n_qubits = n_qubits ) |
def set_value ( self , value : datetime ) :
"""Sets the current value""" | assert isinstance ( value , datetime )
self . value = value |
def user_remove_prj ( self , * args , ** kwargs ) :
"""Remove the selected project from the user
: returns : None
: rtype : None
: raises : None""" | if not self . cur_user :
return
i = self . user_prj_tablev . currentIndex ( )
item = i . internalPointer ( )
if item :
prj = item . internal_data ( )
prj . users . remove ( self . cur_user )
item . set_parent ( None ) |
def before_content ( self ) :
"""Called before parsing content . Push the class name onto the class name
stack . Used to construct the full name for members .""" | ChapelObject . before_content ( self )
if self . names :
self . env . temp_data [ 'chpl:class' ] = self . names [ 0 ] [ 0 ]
self . clsname_set = True |
def items ( self , raw = False ) :
"""Like ` items ` for dicts but with a ` raw ` option
# Parameters
_ raw _ : ` optional [ bool ] `
> Default ` False ` , if ` True ` the ` KeysView ` contains the raw values as the values
# Returns
` KeysView `
> The key - value pairs of the record""" | if raw :
return self . _fieldDict . items ( )
else :
return collections . abc . Mapping . items ( self ) |
def append ( self , item ) :
"""Adds a new item to the end of the collection .""" | if len ( self ) == 0 : # Special case , we make this the current item
self . index = 0
self . items . append ( item ) |
def _from_dict ( cls , _dict ) :
"""Initialize a Features object from a json dictionary .""" | args = { }
if 'concepts' in _dict :
args [ 'concepts' ] = ConceptsOptions . _from_dict ( _dict . get ( 'concepts' ) )
if 'emotion' in _dict :
args [ 'emotion' ] = EmotionOptions . _from_dict ( _dict . get ( 'emotion' ) )
if 'entities' in _dict :
args [ 'entities' ] = EntitiesOptions . _from_dict ( _dict . get ( 'entities' ) )
if 'keywords' in _dict :
args [ 'keywords' ] = KeywordsOptions . _from_dict ( _dict . get ( 'keywords' ) )
if 'metadata' in _dict :
args [ 'metadata' ] = MetadataOptions . _from_dict ( _dict . get ( 'metadata' ) )
if 'relations' in _dict :
args [ 'relations' ] = RelationsOptions . _from_dict ( _dict . get ( 'relations' ) )
if 'semantic_roles' in _dict :
args [ 'semantic_roles' ] = SemanticRolesOptions . _from_dict ( _dict . get ( 'semantic_roles' ) )
if 'sentiment' in _dict :
args [ 'sentiment' ] = SentimentOptions . _from_dict ( _dict . get ( 'sentiment' ) )
if 'categories' in _dict :
args [ 'categories' ] = CategoriesOptions . _from_dict ( _dict . get ( 'categories' ) )
if 'syntax' in _dict :
args [ 'syntax' ] = SyntaxOptions . _from_dict ( _dict . get ( 'syntax' ) )
return cls ( ** args ) |
def _set_counter ( self , v , load = False ) :
"""Setter method for counter , mapped from YANG variable / rbridge _ id / ag / counter ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ counter is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ counter ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = counter . counter , is_container = 'container' , presence = False , yang_name = "counter" , rest_name = "counter" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Set Reliability counter value' } } , namespace = 'urn:brocade.com:mgmt:brocade-ag' , defining_module = 'brocade-ag' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """counter must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=counter.counter, is_container='container', presence=False, yang_name="counter", rest_name="counter", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set Reliability counter value'}}, namespace='urn:brocade.com:mgmt:brocade-ag', defining_module='brocade-ag', yang_type='container', is_config=True)""" , } )
self . __counter = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def p_type ( self , p ) :
'''type : term
| array _ type opt _ order
| pointer _ type
| type LIST
| type SET
| type LPAREN opt _ types RPAREN
| type COLUMN type DICT
| LPAREN types RPAREN
| LARRAY type RARRAY
| type OR type''' | if len ( p ) == 2 :
p [ 0 ] = p [ 1 ] ,
elif len ( p ) == 3 and p [ 2 ] == 'list' :
p [ 0 ] = tuple ( List [ t ] for t in p [ 1 ] )
elif len ( p ) == 3 and p [ 2 ] == 'set' :
p [ 0 ] = tuple ( Set [ t ] for t in p [ 1 ] )
elif len ( p ) == 3 :
if p [ 2 ] is None :
expanded = [ ]
for nd in p [ 1 ] :
expanded . append ( nd )
if isinstance ( nd , NDArray ) and len ( nd . __args__ ) == 3 :
expanded . append ( NDArray [ nd . __args__ [ 0 ] , - 1 : : , - 1 : : ] )
p [ 0 ] = tuple ( expanded )
elif p [ 2 ] == "F" :
for nd in p [ 1 ] :
if len ( nd . __args__ ) != 3 :
raise PythranSyntaxError ( "Invalid Pythran spec. " "F order is only valid for 2D arrays" )
p [ 0 ] = tuple ( NDArray [ nd . __args__ [ 0 ] , - 1 : : , - 1 : : ] for nd in p [ 1 ] )
else :
p [ 0 ] = p [ 1 ]
elif len ( p ) == 5 and p [ 4 ] == ')' :
p [ 0 ] = tuple ( Fun [ args , r ] for r in p [ 1 ] for args in ( product ( * p [ 3 ] ) if len ( p [ 3 ] ) > 1 else p [ 3 ] ) )
elif len ( p ) == 5 :
p [ 0 ] = tuple ( Dict [ k , v ] for k in p [ 1 ] for v in p [ 3 ] )
elif len ( p ) == 4 and p [ 2 ] == 'or' :
p [ 0 ] = p [ 1 ] + p [ 3 ]
elif len ( p ) == 4 and p [ 3 ] == ')' :
p [ 0 ] = tuple ( Tuple [ t ] for t in p [ 2 ] )
elif len ( p ) == 4 and p [ 3 ] == ']' :
p [ 0 ] = p [ 2 ]
else :
raise PythranSyntaxError ( "Invalid Pythran spec. " "Unknown text '{0}'" . format ( p . value ) ) |
def get_section ( self , * key ) :
"""The recommended way of retrieving a section by key when extending configmanager ' s behaviour .""" | section = self . _get_item_or_section ( key )
if not section . is_section :
raise RuntimeError ( '{} is an item, not a section' . format ( key ) )
return section |
def autocomplete ( self , name , context_name = None , include_dubious = False ) :
"""Takes a name and optional context _ name returns a list of matches .
Each match is a dict with :
' higher ' boolean DEF ? ? ?
' exact ' boolean for exact match
' ottId ' int
' name ' name ( or uniqname ? ? ? ) for the taxon in OTT
' nodeId ' int ID of not in the taxomachine db . probably not of use to anyone . . .""" | if context_name and context_name not in self . valid_contexts :
raise ValueError ( '"{}" is not a valid context name' . format ( context_name ) )
if self . use_v1 :
uri = '{p}/autocompleteBoxQuery' . format ( p = self . prefix )
data = { 'queryString' : name }
if context_name :
data [ 'contextName' ] = context_name
else :
uri = '{p}/autocomplete_name' . format ( p = self . prefix )
data = { 'name' : name }
if context_name :
data [ 'context_name' ] = context_name
if include_dubious :
data [ 'include_dubious' ] = True
return self . json_http_post ( uri , data = anyjson . dumps ( data ) ) |
def gauss_fit ( X , Y ) :
"""Fit the function to a gaussian .
Parameters
X : 1d array
X values
Y : 1d array
Y values
Returns
( The return from scipy . optimize . curve _ fit )
popt : array
Optimal values for the parameters
pcov : 2d array
The estimated covariance of popt .
Notes
/ ! \ This uses a slow curve _ fit function ! do not use if need speed !""" | X = np . asarray ( X )
Y = np . asarray ( Y )
# Can not have negative values
Y [ Y < 0 ] = 0
# define gauss function
def gauss ( x , a , x0 , sigma ) :
return a * np . exp ( - ( x - x0 ) ** 2 / ( 2 * sigma ** 2 ) )
# get first estimation for parameter
mean = ( X * Y ) . sum ( ) / Y . sum ( )
sigma = np . sqrt ( ( Y * ( ( X - mean ) ** 2 ) ) . sum ( ) / Y . sum ( ) )
height = Y . max ( )
# fit with curve _ fit
return curve_fit ( gauss , X , Y , p0 = [ height , mean , sigma ] ) |
def calculate_total_amt ( self , items = { } ) :
"""Returns the total amount / cost of items in the current invoice""" | _items = items . items ( ) or self . items . items ( )
return sum ( float ( x [ 1 ] . total_price ) for x in _items ) |
def is_iterable ( value , include_maps = False , include_sets = True ) :
"""Returns whether value is iterable .
` ` include _ maps ` `
Maps are technically iterable , defaulting to their keys , but you
commonly want to find if it is a list - like type and leave maps alone ,
so this is False by default .
` ` include _ sets ` `
Sets are also technically iterable and can be treated like lists , but
sometimes you don ' t want to include sets . Defaults to True .
Strings are not considered iterable , even though you can iterate over them
if you really want to , because the ` ` _ _ iter _ _ ` ` method is not defined for
them by Python . This is usually a good thing - - you probably don ' t want
to iterate over the characters of a string by accident .""" | if hasattr ( value , '__iter__' ) :
if not include_maps and hasattr ( value , 'keys' ) :
return False
if not include_sets and hasattr ( value , 'isdisjoint' ) :
return False
return True
else :
return False |
def filter ( self , obj , * args , ** kwargs ) :
"""Filter the given object through the filter chain .
: param obj : The object to filter
: param args : Additional arguments to pass to each filter function .
: param kwargs : Additional keyword arguments to pass to each filter
function .
: return : The filtered object or : data : ` None `
See the documentation of : class : ` Filter ` on how filtering operates .
Returns the object returned by the last function in the filter chain or
: data : ` None ` if any function returned : data : ` None ` .""" | for _ , _ , func in self . _filter_order :
obj = func ( obj , * args , ** kwargs )
if obj is None :
return None
return obj |
def _check_time_range ( time_range , now ) :
'''Check time range''' | if _TIME_SUPPORTED :
_start = dateutil_parser . parse ( time_range [ 'start' ] )
_end = dateutil_parser . parse ( time_range [ 'end' ] )
return bool ( _start <= now <= _end )
else :
log . error ( 'Dateutil is required.' )
return False |
def ensure_scheme ( url , default_scheme = 'http' ) :
"""Adds a scheme to a url if not present .
Args :
url ( string ) : a url , assumed to start with netloc
default _ scheme ( string ) : a scheme to be added
Returns :
string : URL with a scheme""" | parsed = urlsplit ( url , scheme = default_scheme )
if not parsed . netloc :
parsed = SplitResult ( scheme = parsed . scheme , netloc = parsed . path , path = '' , query = parsed . query , fragment = parsed . fragment )
return urlunsplit ( parsed ) |
def get ( method , hmc , uri , uri_parms , logon_required ) :
"""Operation : List LDAP Server Definitions .""" | query_str = uri_parms [ 0 ]
try :
console = hmc . consoles . lookup_by_oid ( None )
except KeyError :
raise InvalidResourceError ( method , uri )
result_ldap_srv_defs = [ ]
filter_args = parse_query_parms ( method , uri , query_str )
for ldap_srv_def in console . ldap_server_definitions . list ( filter_args ) :
result_ldap_srv_def = { }
for prop in ldap_srv_def . properties :
if prop in ( 'element-uri' , 'name' , 'type' ) :
result_ldap_srv_def [ prop ] = ldap_srv_def . properties [ prop ]
result_ldap_srv_defs . append ( result_ldap_srv_def )
return { 'ldap-server-definitions' : result_ldap_srv_defs } |
def cache_invalidate_by_tags ( tags , cache = None ) :
"""Clear cache by tags .""" | if isinstance ( tags , basestring ) :
tags = [ tags ]
tag_keys = [ CACHE_TAG_KEY % tag for tag in tags if tag ]
if not tag_keys :
raise ValueError ( 'Attr tags invalid' )
if cache is None :
cache = default_cache
tag_keys_for_delete = [ ]
if cache . __class__ . __name__ == 'RedisCache' :
from django_redis . exceptions import ConnectionInterrupted
try :
redis_client = cache . client . get_client ( )
for tag_key in tag_keys :
keys = redis_client . smembers ( tag_key )
if keys :
cache . delete_many ( keys )
tag_keys_for_delete . append ( tag_key )
except ConnectionInterrupted :
pass
# todo add logging
else :
for tag_key in tag_keys :
keys = cache . get ( tag_key )
if keys :
cache . delete_many ( keys )
tag_keys_for_delete . append ( tag_key )
if tag_keys_for_delete :
cache . delete_many ( tag_keys_for_delete ) |
async def serve ( http_handler : HTTP_WRAPPER_TYPE , websocket_handler = None , address : str = '127.0.0.1' , port : int = 8000 ) :
"""start server""" | return await asyncio . start_server ( SocketWrapper ( http_handler , websocket_handler ) , address , port ) |
def interpolations_to_summary ( sample_ind , interpolations , first_frame , last_frame , hparams , decode_hp ) :
"""Converts interpolated frames into tf summaries .
The summaries consists of :
1 . Image summary corresponding to the first frame .
2 . Image summary corresponding to the last frame .
3 . The interpolated frames as a gif summary .
Args :
sample _ ind : int
interpolations : Numpy array , shape = ( num _ interp , H , W , 3)
first _ frame : Numpy array , shape = ( HWC )
last _ frame : Numpy array , shape = ( HWC )
hparams : HParams , train hparams
decode _ hp : HParams , decode hparams
Returns :
summaries : list of tf Summary Values .""" | parent_tag = "sample_%d" % sample_ind
frame_shape = hparams . problem . frame_shape
interp_shape = [ hparams . batch_size , decode_hp . num_interp ] + frame_shape
interpolations = np . reshape ( interpolations , interp_shape )
interp_tag = "%s/interp/%s" % ( parent_tag , decode_hp . channel_interp )
if decode_hp . channel_interp == "ranked" :
interp_tag = "%s/rank_%d" % ( interp_tag , decode_hp . rank_interp )
summaries , _ = common_video . py_gif_summary ( interp_tag , interpolations , return_summary_value = True , max_outputs = decode_hp . max_display_outputs , fps = decode_hp . frames_per_second )
if decode_hp . save_frames :
first_frame_summ = image_utils . image_to_tf_summary_value ( first_frame , "%s/first" % parent_tag )
last_frame_summ = image_utils . image_to_tf_summary_value ( last_frame , "%s/last" % parent_tag )
summaries . append ( first_frame_summ )
summaries . append ( last_frame_summ )
return summaries |
def get_handler_classes ( self ) :
"""Return the list of handlers to use when receiving RPC requests .""" | handler_classes = [ import_string ( handler_cls ) for handler_cls in settings . MODERNRPC_HANDLERS ]
if self . protocol == ALL :
return handler_classes
else :
return [ cls for cls in handler_classes if cls . protocol in ensure_sequence ( self . protocol ) ] |
def __search ( self ) :
"""Performs the search .""" | self . __search_results = [ ]
editorsFiles = self . __container . default_target in self . __location . targets and [ editor . file for editor in self . __container . script_editor . list_editors ( ) ] or [ ]
self . __search_editors_files ( editorsFiles )
self . __search_files ( self . __location . files )
for directory in self . __location . directories :
if self . __interrupt :
return
files_walker = foundations . walkers . files_walker ( directory , self . __location . filters_in , list ( itertools . chain ( self . __location . filters_out , self . __location . files , editorsFiles ) ) )
self . __search_files ( files_walker )
not self . __interrupt and self . searchFinished . emit ( self . __search_results ) |
def get_workflows ( self ) :
"""Scans and loads all wf found under WORKFLOW _ PACKAGES _ PATHS
Yields : XML content of diagram file""" | for pth in settings . WORKFLOW_PACKAGES_PATHS :
for f in glob . glob ( "%s/*.bpmn" % pth ) :
with open ( f ) as fp :
yield os . path . basename ( os . path . splitext ( f ) [ 0 ] ) , fp . read ( ) |
def export_aggregate_by_csv ( ekey , dstore ) :
""": param ekey : export key , i . e . a pair ( datastore key , fmt )
: param dstore : datastore object""" | token , what = ekey [ 0 ] . split ( '/' , 1 )
aw = extract ( dstore , 'aggregate/' + what )
fnames = [ ]
writer = writers . CsvWriter ( fmt = writers . FIVEDIGITS )
path = '%s.%s' % ( sanitize ( ekey [ 0 ] ) , ekey [ 1 ] )
fname = dstore . export_path ( path )
writer . save ( aw . to_table ( ) , fname )
fnames . append ( fname )
return fnames |
def generate_id_token ( self , name , audience , delegates = None , include_email = None , retry = google . api_core . gapic_v1 . method . DEFAULT , timeout = google . api_core . gapic_v1 . method . DEFAULT , metadata = None , ) :
"""Generates an OpenID Connect ID token for a service account .
Example :
> > > from google . cloud import iam _ credentials _ v1
> > > client = iam _ credentials _ v1 . IAMCredentialsClient ( )
> > > name = client . service _ account _ path ( ' [ PROJECT ] ' , ' [ SERVICE _ ACCOUNT ] ' )
> > > # TODO : Initialize ` audience ` :
> > > audience = ' '
> > > response = client . generate _ id _ token ( name , audience )
Args :
name ( str ) : The resource name of the service account for which the credentials are
requested , in the following format :
` ` projects / - / serviceAccounts / { ACCOUNT _ EMAIL _ OR _ UNIQUEID } ` ` .
audience ( str ) : The audience for the token , such as the API or account that this token
grants access to .
delegates ( list [ str ] ) : The sequence of service accounts in a delegation chain . Each service
account must be granted the ` ` roles / iam . serviceAccountTokenCreator ` `
role on its next service account in the chain . The last service account
in the chain must be granted the
` ` roles / iam . serviceAccountTokenCreator ` ` role on the service account
that is specified in the ` ` name ` ` field of the request .
The delegates must have the following format :
` ` projects / - / serviceAccounts / { ACCOUNT _ EMAIL _ OR _ UNIQUEID } ` `
include _ email ( bool ) : Include the service account email in the token . If set to ` ` true ` ` , the
token will contain ` ` email ` ` and ` ` email _ verified ` ` claims .
retry ( Optional [ google . api _ core . retry . Retry ] ) : A retry object used
to retry requests . If ` ` None ` ` is specified , requests will not
be retried .
timeout ( Optional [ float ] ) : The amount of time , in seconds , to wait
for the request to complete . Note that if ` ` retry ` ` is
specified , the timeout applies to each individual attempt .
metadata ( Optional [ Sequence [ Tuple [ str , str ] ] ] ) : Additional metadata
that is provided to the method .
Returns :
A : class : ` ~ google . cloud . iam _ credentials _ v1 . types . GenerateIdTokenResponse ` instance .
Raises :
google . api _ core . exceptions . GoogleAPICallError : If the request
failed for any reason .
google . api _ core . exceptions . RetryError : If the request failed due
to a retryable error and retry attempts failed .
ValueError : If the parameters are invalid .""" | # Wrap the transport method to add retry and timeout logic .
if "generate_id_token" not in self . _inner_api_calls :
self . _inner_api_calls [ "generate_id_token" ] = google . api_core . gapic_v1 . method . wrap_method ( self . transport . generate_id_token , default_retry = self . _method_configs [ "GenerateIdToken" ] . retry , default_timeout = self . _method_configs [ "GenerateIdToken" ] . timeout , client_info = self . _client_info , )
request = common_pb2 . GenerateIdTokenRequest ( name = name , audience = audience , delegates = delegates , include_email = include_email , )
if metadata is None :
metadata = [ ]
metadata = list ( metadata )
try :
routing_header = [ ( "name" , name ) ]
except AttributeError :
pass
else :
routing_metadata = google . api_core . gapic_v1 . routing_header . to_grpc_metadata ( routing_header )
metadata . append ( routing_metadata )
return self . _inner_api_calls [ "generate_id_token" ] ( request , retry = retry , timeout = timeout , metadata = metadata ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.