signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def clone ( self ) :
"""Return a new Scope object that has the curr _ scope
pinned at the current one
: returns : A new scope object"""
|
self . _dlog ( "cloning the stack" )
# TODO is this really necessary to create a brand new one ?
# I think it is . . . need to think about it more .
# or . . . are we going to need ref counters and a global
# scope object that allows a view into ( or a snapshot of )
# a specific scope stack ?
res = Scope ( self . _log )
res . _scope_stack = self . _scope_stack
res . _curr_scope = self . _curr_scope
return res
|
def get_based_on_grades_metadata ( self ) :
"""Gets the metadata for a grade - based designation .
return : ( osid . Metadata ) - metadata for the grade - based
designation
* compliance : mandatory - - This method must be implemented . *"""
|
# Implemented from template for osid . resource . ResourceForm . get _ group _ metadata _ template
metadata = dict ( self . _mdata [ 'based_on_grades' ] )
metadata . update ( { 'existing_boolean_values' : self . _my_map [ 'basedOnGrades' ] } )
return Metadata ( ** metadata )
|
def handle_http_error ( self , code , error ) :
"""Helper that renders ` error { code } . html ` .
Convenient way to use it : :
from functools import partial
handler = partial ( app . handle _ http _ error , code )
app . errorhandler ( code ) ( handler )"""
|
# 5xx code : error on server side
if ( code // 100 ) == 5 : # ensure rollback if needed , else error page may
# have an error , too , resulting in raw 500 page : - (
db . session . rollback ( )
template = f"error{code:d}.html"
return render_template ( template , error = error ) , code
|
def set_role_id ( self , role_name , role_id , mount_point = 'approle' ) :
"""POST / auth / < mount _ point > / role / < role name > / role - id
: param role _ name :
: type role _ name :
: param role _ id :
: type role _ id :
: param mount _ point :
: type mount _ point :
: return :
: rtype :"""
|
url = '/v1/auth/{0}/role/{1}/role-id' . format ( mount_point , role_name )
params = { 'role_id' : role_id }
return self . _adapter . post ( url , json = params )
|
def asarray ( self , out = None , squeeze = True , lock = None , reopen = True , maxsize = None , maxworkers = None , validate = True ) :
"""Read image data from file and return as numpy array .
Raise ValueError if format is unsupported .
Parameters
out : numpy . ndarray , str , or file - like object
Buffer where image data will be saved .
If None ( default ) , a new array will be created .
If numpy . ndarray , a writable array of compatible dtype and shape .
If ' memmap ' , directly memory - map the image data in the TIFF file
if possible ; else create a memory - mapped array in a temporary file .
If str or open file , the file name or file object used to
create a memory - map to an array stored in a binary file on disk .
squeeze : bool
If True ( default ) , all length - 1 dimensions ( except X and Y ) are
squeezed out from the array .
If False , the shape of the returned array might be different from
the page . shape .
lock : { RLock , NullContext }
A reentrant lock used to syncronize reads from file .
If None ( default ) , the lock of the parent ' s filehandle is used .
reopen : bool
If True ( default ) and the parent file handle is closed , the file
is temporarily re - opened and closed if no exception occurs .
maxsize : int
Maximum size of data before a ValueError is raised .
Can be used to catch DOS . Default : 16 TB .
maxworkers : int or None
Maximum number of threads to concurrently decode tile data .
If None ( default ) , up to half the CPU cores are used for
compressed tiles .
See remarks in TiffFile . asarray .
validate : bool
If True ( default ) , validate various parameters .
If None , only validate parameters and return None .
Returns
numpy . ndarray
Numpy array of decompressed , depredicted , and unpacked image data
read from Strip / Tile Offsets / ByteCounts , formatted according to
shape and dtype metadata found in tags and parameters .
Photometric conversion , pre - multiplied alpha , orientation , and
colorimetry corrections are not applied . Specifically , CMYK images
are not converted to RGB , MinIsWhite images are not inverted ,
and color palettes are not applied ."""
|
# properties from TiffPage or TiffFrame
fh = self . parent . filehandle
byteorder = self . parent . tiff . byteorder
offsets , bytecounts = self . _offsetscounts
self_ = self
self = self . keyframe
# self or keyframe
if not self . _shape or product ( self . _shape ) == 0 :
return None
tags = self . tags
if validate or validate is None :
if maxsize is None :
maxsize = 2 ** 44
if maxsize and product ( self . _shape ) > maxsize :
raise ValueError ( 'data are too large %s' % str ( self . _shape ) )
if self . dtype is None :
raise ValueError ( 'data type not supported: %s%i' % ( self . sampleformat , self . bitspersample ) )
if self . compression not in TIFF . DECOMPESSORS :
raise ValueError ( 'cannot decompress %s' % self . compression . name )
if 'SampleFormat' in tags :
tag = tags [ 'SampleFormat' ]
if tag . count != 1 and any ( ( i - tag . value [ 0 ] for i in tag . value ) ) :
raise ValueError ( 'sample formats do not match %s' % tag . value )
if self . is_subsampled and ( self . compression not in ( 6 , 7 ) or self . planarconfig == 2 ) :
raise NotImplementedError ( 'chroma subsampling not supported' )
if validate is None :
return None
lock = fh . lock if lock is None else lock
with lock :
closed = fh . closed
if closed :
if reopen :
fh . open ( )
else :
raise IOError ( 'file handle is closed' )
dtype = self . _dtype
shape = self . _shape
imagewidth = self . imagewidth
imagelength = self . imagelength
imagedepth = self . imagedepth
bitspersample = self . bitspersample
typecode = byteorder + dtype . char
lsb2msb = self . fillorder == 2
istiled = self . is_tiled
if istiled :
tilewidth = self . tilewidth
tilelength = self . tilelength
tiledepth = self . tiledepth
tw = ( imagewidth + tilewidth - 1 ) // tilewidth
tl = ( imagelength + tilelength - 1 ) // tilelength
td = ( imagedepth + tiledepth - 1 ) // tiledepth
tiledshape = ( td , tl , tw )
tileshape = ( tiledepth , tilelength , tilewidth , shape [ - 1 ] )
runlen = tilewidth
else :
runlen = imagewidth
if self . planarconfig == 1 :
runlen *= self . samplesperpixel
if isinstance ( out , str ) and out == 'memmap' and self . is_memmappable : # direct memory map array in file
with lock :
result = fh . memmap_array ( typecode , shape , offset = offsets [ 0 ] )
elif self . is_contiguous : # read contiguous bytes to array
if out is not None :
out = create_output ( out , shape , dtype )
with lock :
fh . seek ( offsets [ 0 ] )
result = fh . read_array ( typecode , product ( shape ) , out = out )
if lsb2msb :
bitorder_decode ( result , out = result )
else : # decompress , unpack , . . . individual strips or tiles
result = create_output ( out , shape , dtype )
decompress = TIFF . DECOMPESSORS [ self . compression ]
if self . compression in ( 6 , 7 ) : # COMPRESSION . JPEG
colorspace = None
outcolorspace = None
jpegtables = None
if lsb2msb :
log . warning ( 'TiffPage.asarray: disabling LSB2MSB for JPEG' )
lsb2msb = False
if 'JPEGTables' in tags : # load JPEGTables from TiffFrame
jpegtables = self_ . _gettags ( { 347 } , lock = lock ) [ 0 ] [ 1 ] . value
# TODO : obtain table from OJPEG tags
# elif ( ' JPEGInterchangeFormat ' in tags and
# ' JPEGInterchangeFormatLength ' in tags and
# tags [ ' JPEGInterchangeFormat ' ] . value ! = offsets [ 0 ] ) :
# fh . seek ( tags [ ' JPEGInterchangeFormat ' ] . value )
# fh . read ( tags [ ' JPEGInterchangeFormatLength ' ] . value )
if 'ExtraSamples' in tags :
pass
elif self . photometric == 6 : # YCBCR - > RGB
outcolorspace = 'RGB'
elif self . photometric == 2 :
if self . planarconfig == 2 : # TODO : decode JPEG to planar RGB
raise NotImplementedError ( 'cannot decode JPEG to planar RGB' )
colorspace = outcolorspace = 'RGB'
else :
outcolorspace = TIFF . PHOTOMETRIC ( self . photometric ) . name
if istiled :
heightwidth = tilelength , tilewidth
else :
heightwidth = imagelength , imagewidth
def decompress ( data , bitspersample = bitspersample , jpegtables = jpegtables , colorspace = colorspace , outcolorspace = outcolorspace , shape = heightwidth , out = None , _decompress = decompress ) :
return _decompress ( data , bitspersample , jpegtables , colorspace , outcolorspace , shape , out )
def unpack ( data ) :
return data . reshape ( - 1 )
elif bitspersample in ( 8 , 16 , 32 , 64 , 128 ) :
if ( bitspersample * runlen ) % 8 :
raise ValueError ( 'data and sample size mismatch' )
if self . predictor == 3 : # PREDICTOR . FLOATINGPOINT
# the floating - point horizontal differencing decoder
# needs the raw byte order
typecode = dtype . char
def unpack ( data , typecode = typecode , out = None ) :
try : # read only numpy array
return numpy . frombuffer ( data , typecode )
except ValueError : # strips may be missing EOI
# log . warning ( ' TiffPage . asarray : . . . ' )
bps = bitspersample // 8
xlen = ( len ( data ) // bps ) * bps
return numpy . frombuffer ( data [ : xlen ] , typecode )
elif isinstance ( bitspersample , tuple ) :
def unpack ( data , out = None ) :
return unpack_rgb ( data , typecode , bitspersample )
else :
def unpack ( data , out = None ) :
return packints_decode ( data , typecode , bitspersample , runlen )
# TODO : store decode function for future use
# TODO : unify tile and strip decoding
if istiled :
unpredict = TIFF . UNPREDICTORS [ self . predictor ]
def decode ( tile , tileindex ) :
return tile_decode ( tile , tileindex , tileshape , tiledshape , lsb2msb , decompress , unpack , unpredict , result [ 0 ] )
tileiter = buffered_read ( fh , lock , offsets , bytecounts )
if maxworkers is None :
maxworkers = 0 if self . compression > 1 else 1
if maxworkers == 0 :
import multiprocessing
# noqa : delay import
maxworkers = multiprocessing . cpu_count ( ) // 2
if maxworkers < 2 :
for i , tile in enumerate ( tileiter ) :
decode ( tile , i )
else : # decode first tile un - threaded to catch exceptions
decode ( next ( tileiter ) , 0 )
with ThreadPoolExecutor ( maxworkers ) as executor :
executor . map ( decode , tileiter , range ( 1 , len ( offsets ) ) )
else :
stripsize = self . rowsperstrip * self . imagewidth
if self . planarconfig == 1 :
stripsize *= self . samplesperpixel
outsize = stripsize * self . dtype . itemsize
result = result . reshape ( - 1 )
index = 0
for strip in buffered_read ( fh , lock , offsets , bytecounts ) :
if lsb2msb :
strip = bitorder_decode ( strip , out = strip )
strip = decompress ( strip , out = outsize )
strip = unpack ( strip )
size = min ( result . size , strip . size , stripsize , result . size - index )
result [ index : index + size ] = strip [ : size ]
del strip
index += size
result . shape = self . _shape
if self . predictor != 1 and not ( istiled and not self . is_contiguous ) :
unpredict = TIFF . UNPREDICTORS [ self . predictor ]
result = unpredict ( result , axis = - 2 , out = result )
if squeeze :
try :
result . shape = self . shape
except ValueError :
log . warning ( 'TiffPage.asarray: failed to reshape %s to %s' , result . shape , self . shape )
if closed : # TODO : file should remain open if an exception occurred above
fh . close ( )
return result
|
def cookie_encode ( data , key ) :
'''Encode and sign a pickle - able object . Return a ( byte ) string'''
|
msg = base64 . b64encode ( pickle . dumps ( data , - 1 ) )
sig = base64 . b64encode ( hmac . new ( tob ( key ) , msg ) . digest ( ) )
return tob ( '!' ) + sig + tob ( '?' ) + msg
|
def request ( self , method , params = None ) :
"""Send a JSON RPC request to the client .
Args :
method ( str ) : The method name of the message to send
params ( any ) : The payload of the message
Returns :
Future that will resolve once a response has been received"""
|
msg_id = self . _id_generator ( )
log . debug ( 'Sending request with id %s: %s %s' , msg_id , method , params )
message = { 'jsonrpc' : JSONRPC_VERSION , 'id' : msg_id , 'method' : method , }
if params is not None :
message [ 'params' ] = params
request_future = futures . Future ( )
request_future . add_done_callback ( self . _cancel_callback ( msg_id ) )
self . _server_request_futures [ msg_id ] = request_future
self . _consumer ( message )
return request_future
|
def chats_search ( self , q = None , ** kwargs ) :
"https : / / developer . zendesk . com / rest _ api / docs / chat / chats # search - chats"
|
api_path = "/api/v2/chats/search"
api_query = { }
if "query" in kwargs . keys ( ) :
api_query . update ( kwargs [ "query" ] )
del kwargs [ "query" ]
if q :
api_query . update ( { "q" : q , } )
return self . call ( api_path , query = api_query , ** kwargs )
|
def dump ( df , fp ) :
"""dump DataFrame to file
: param DataFrame df :
: param file fp :"""
|
arff = __dump ( df )
liacarff . dump ( arff , fp )
|
def exceptionCaught ( self , exc = None , ** kwargs ) :
'Maintain list of most recent errors and return most recent one .'
|
if isinstance ( exc , ExpectedException ) : # already reported , don ' t log
return
self . lastErrors . append ( stacktrace ( ) )
if kwargs . get ( 'status' , True ) :
status ( self . lastErrors [ - 1 ] [ - 1 ] , priority = 2 )
# last line of latest error
if options . debug :
raise
|
def maps_get_default_rules_output_rules_monitor ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
maps_get_default_rules = ET . Element ( "maps_get_default_rules" )
config = maps_get_default_rules
output = ET . SubElement ( maps_get_default_rules , "output" )
rules = ET . SubElement ( output , "rules" )
monitor = ET . SubElement ( rules , "monitor" )
monitor . text = kwargs . pop ( 'monitor' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def _aspirate_plunger_position ( self , ul ) :
"""Calculate axis position for a given liquid volume .
Translates the passed liquid volume to absolute coordinates
on the axis associated with this pipette .
Calibration of the pipette motor ' s ul - to - mm conversion is required"""
|
millimeters = ul / self . _ul_per_mm ( ul , 'aspirate' )
destination_mm = self . _get_plunger_position ( 'bottom' ) + millimeters
return round ( destination_mm , 6 )
|
def _is_valid_token ( self , auth_token ) :
'''Check if this is a valid salt - api token or valid Salt token
salt - api tokens are regular session tokens that tie back to a real Salt
token . Salt tokens are tokens generated by Salt ' s eauth system .
: return bool : True if valid , False if not valid .'''
|
# Make sure that auth token is hex . If it ' s None , or something other
# than hex , this will raise a ValueError .
try :
int ( auth_token , 16 )
except ( TypeError , ValueError ) :
return False
# First check if the given token is in our session table ; if so it ' s a
# salt - api token and we need to get the Salt token from there .
orig_session , _ = cherrypy . session . cache . get ( auth_token , ( { } , None ) )
# If it ' s not in the session table , assume it ' s a regular Salt token .
salt_token = orig_session . get ( 'token' , auth_token )
# The eauth system does not currently support perms for the event
# stream , so we ' re just checking if the token exists not if the token
# allows access .
if salt_token and self . resolver . get_token ( salt_token ) :
return True
return False
|
def difference ( * sets , ** kwargs ) :
"""subtracts all tail sets from the head set
Parameters
sets : tuple of indexable objects
first set is the head , from which we subtract
other items form the tail , which are subtracted from head
Returns
items which are in the head but not in any of the tail sets
Notes
alt implementation : compute union of tail , then union with head , then use set _ count ( 1)"""
|
head , tail = sets [ 0 ] , sets [ 1 : ]
idx = as_index ( head , ** kwargs )
lhs = idx . unique
rhs = [ intersection ( idx , s , ** kwargs ) for s in tail ]
return exclusive ( lhs , * rhs , axis = 0 , assume_unique = True )
|
def sunset_utc ( self , date , latitude , longitude , observer_elevation = 0 ) :
"""Calculate sunset time in the UTC timezone .
: param date : Date to calculate for .
: type date : : class : ` datetime . date `
: param latitude : Latitude - Northern latitudes should be positive
: type latitude : float
: param longitude : Longitude - Eastern longitudes should be positive
: type longitude : float
: param observer _ elevation : Elevation in metres to calculate sunset for
: type observer _ elevation : int
: return : The UTC date and time at which sunset occurs .
: rtype : : class : ` ~ datetime . datetime `"""
|
try :
return self . _calc_time ( 90 + 0.833 , SUN_SETTING , date , latitude , longitude , observer_elevation )
except ValueError as exc :
if exc . args [ 0 ] == "math domain error" :
raise AstralError ( ( "Sun never reaches the horizon on this day, " "at this location." ) )
else :
raise
|
async def _put_chunk ( cls , session : aiohttp . ClientSession , upload_uri : str , buf : bytes ) :
"""Upload one chunk to ` upload _ uri ` ."""
|
# Build the correct headers .
headers = { 'Content-Type' : 'application/octet-stream' , 'Content-Length' : '%s' % len ( buf ) , }
credentials = cls . _handler . session . credentials
if credentials is not None :
utils . sign ( upload_uri , headers , credentials )
# Perform upload of chunk .
async with await session . put ( upload_uri , data = buf , headers = headers ) as response :
if response . status != 200 :
content = await response . read ( )
request = { "body" : buf , "headers" : headers , "method" : "PUT" , "uri" : upload_uri , }
raise CallError ( request , response , content , None )
|
def exception ( function ) :
"""A decorator that wraps the passed in function and prints exception should one occur"""
|
@ functools . wraps ( function )
def wrapper ( * args , ** kwargs ) :
try :
return function ( * args , ** kwargs )
except Exception as e : # print
err = "There was an exception in %s():" % ( function . __name__ )
print ( ( "%s \n %s \n%s" % ( err , e , sys . exc_info ( ) ) ) )
return - 1
return wrapper
|
def add_post_configure_callback ( self , callback , run_once = False ) :
"""Add a new callback to be run after every call to : meth : ` configure ` .
Functions run at the end of : meth : ` configure ` are given the
application ' s resulting configuration and the arguments passed to
: meth : ` configure ` , in that order . As a note , this first argument will
be an immutable dictionary .
The return value of all registered callbacks is entirely ignored .
Callbacks are run in the order they are registered , but you should
never depend on another callback .
. . admonition : : The " Resulting " Configuration
The first argument to the callback is always the " resulting "
configuration from the call to : meth : ` configure ` . What this means
is you will get the Application ' s FROZEN configuration after the
call to : meth : ` configure ` finished . Moreover , this resulting
configuration will be an
: class : ` ~ werkzeug . datastructures . ImmutableDict ` .
The purpose of a Post Configure callback is not to futher alter the
configuration , but rather to do lazy initialization for anything
that absolutely requires the configuration , so any attempt to alter
the configuration of the app has been made intentionally difficult !
Args :
callback ( function ) :
The function you wish to run after : meth : ` configure ` . Will
receive the application ' s current configuration as the first
arugment , and the same arguments passed to : meth : ` configure ` as
the second .
Keyword Args :
run _ once ( bool ) :
Should this callback run every time configure is called ? Or
just once and be deregistered ? Pass ` ` True ` ` to only run it
once .
Returns :
fleaker . base . BaseApplication :
Returns itself for a fluent interface ."""
|
if run_once :
self . _post_configure_callbacks [ 'single' ] . append ( callback )
else :
self . _post_configure_callbacks [ 'multiple' ] . append ( callback )
return self
|
def login ( self ) :
"""Logs the user in .
The log in information is saved in the client
- userid
- username
- cookies
: return : The raw response from the request"""
|
if self . options [ 'token' ] :
self . client . token = self . options [ 'token' ]
result = self . users . get_user ( 'me' )
else :
response = self . users . login_user ( { 'login_id' : self . options [ 'login_id' ] , 'password' : self . options [ 'password' ] , 'token' : self . options [ 'mfa_token' ] } )
if response . status_code == 200 :
self . client . token = response . headers [ 'Token' ]
self . client . cookies = response . cookies
try :
result = response . json ( )
except ValueError :
log . debug ( 'Could not convert response to json, returning raw response' )
result = response
log . debug ( result )
if 'id' in result :
self . client . userid = result [ 'id' ]
if 'username' in result :
self . client . username = result [ 'username' ]
return result
|
def output_callback ( self , line , kill_switch ) :
"""Set status of openvpn according to what we process"""
|
self . notifications += line + "\n"
if "Initialization Sequence Completed" in line :
self . started = True
if "ERROR:" in line or "Cannot resolve host address:" in line :
self . error = True
if "process exiting" in line :
self . stopped = True
|
def _label_to_tag ( self , name , labels , scraper_config , tag_name = None ) :
"""Search for ` name ` in labels name and returns corresponding tag string .
Tag name is label name if not specified .
Returns None if name was not found ."""
|
value = labels . get ( name )
if value :
return self . _format_tag ( tag_name or name , value , scraper_config )
else :
return None
|
def dedupe ( items ) :
"""Remove duplicates from a sequence ( of hashable items ) while maintaining
order . NOTE : This only works if items in the list are hashable types .
Taken from the Python Cookbook , 3rd ed . Such a great book !"""
|
seen = set ( )
for item in items :
if item not in seen :
yield item
seen . add ( item )
|
def read_hector_constraint ( constraint_file ) :
"""Reads a Hector contraint CSV file and returns it as a Pandas Series"""
|
df = pd . read_csv ( constraint_file , index_col = 0 , comment = ";" )
df = df [ df . applymap ( lambda x : isinstance ( x , ( int , float ) ) ) ]
df . index = df . index . astype ( int )
return df . iloc [ : , 0 ]
|
def get_cancer_studies ( study_filter = None ) :
"""Return a list of cancer study identifiers , optionally filtered .
There are typically multiple studies for a given type of cancer and
a filter can be used to constrain the returned list .
Parameters
study _ filter : Optional [ str ]
A string used to filter the study IDs to return . Example : " paad "
Returns
study _ ids : list [ str ]
A list of study IDs .
For instance " paad " as a filter would result in a list
of study IDs with paad in their name like " paad _ icgc " , " paad _ tcga " ,
etc ."""
|
data = { 'cmd' : 'getCancerStudies' }
df = send_request ( ** data )
res = _filter_data_frame ( df , [ 'cancer_study_id' ] , 'cancer_study_id' , study_filter )
study_ids = list ( res [ 'cancer_study_id' ] . values ( ) )
return study_ids
|
def rgb_to_xyz ( r , g = None , b = None ) :
"""Convert the color from sRGB to CIE XYZ .
The methods assumes that the RGB coordinates are given in the sRGB
colorspace ( D65 ) .
. . note : :
Compensation for the sRGB gamma correction is applied before converting .
Parameters :
The Red component value [ 0 . . . 1]
The Green component value [ 0 . . . 1]
The Blue component value [ 0 . . . 1]
Returns :
The color as an ( x , y , z ) tuple in the range :
x [ 0 . . . 1 ] ,
y [ 0 . . . 1 ] ,
z [ 0 . . . 1]
> > > ' ( % g , % g , % g ) ' % rgb _ to _ xyz ( 1 , 0.5 , 0)
' ( 0.488941 , 0.365682 , 0.0448137 ) '"""
|
if type ( r ) in [ list , tuple ] :
r , g , b = r
r , g , b = [ ( ( v <= 0.03928 ) and [ v / 12.92 ] or [ ( ( v + 0.055 ) / 1.055 ) ** 2.4 ] ) [ 0 ] for v in ( r , g , b ) ]
x = ( r * 0.4124 ) + ( g * 0.3576 ) + ( b * 0.1805 )
y = ( r * 0.2126 ) + ( g * 0.7152 ) + ( b * 0.0722 )
z = ( r * 0.0193 ) + ( g * 0.1192 ) + ( b * 0.9505 )
return ( x , y , z )
|
def designator ( self ) :
"""Returns the version and error correction level as string ` V - E ` where
` V ` represents the version number and ` E ` the error level ."""
|
version = str ( self . version )
return '-' . join ( ( version , self . error ) if self . error else ( version , ) )
|
def upcoming_releases ( self , product ) :
"""Get upcoming releases for this product .
Specifically we search for releases with a GA date greater - than or
equal to today ' s date .
: param product : str , eg . " ceph "
: returns : deferred that when fired returns a list of Munch ( dict - like )
objects representing all releases , sorted by shortname ."""
|
url = 'api/v6/releases/'
url = url + '?product__shortname=' + product
url = url + '&ga_date__gte=' + date . today ( ) . strftime ( '%Y-%m-%d' )
url = url + '&ordering=shortname_sort'
releases = yield self . _get ( url )
result = munchify ( releases )
defer . returnValue ( result )
|
def create_textview ( self , wrap_mode = Gtk . WrapMode . WORD_CHAR , justify = Gtk . Justification . LEFT , visible = True , editable = True ) :
"""Function creates a text view with wrap _ mode
and justification"""
|
text_view = Gtk . TextView ( )
text_view . set_wrap_mode ( wrap_mode )
text_view . set_editable ( editable )
if not editable :
text_view . set_cursor_visible ( False )
else :
text_view . set_cursor_visible ( visible )
text_view . set_justification ( justify )
return text_view
|
def get_q_version ( q_home ) :
"""Return version of q installed at q _ home"""
|
with open ( os . path . join ( q_home , 'q.k' ) ) as f :
for line in f :
if line . startswith ( 'k:' ) :
return line [ 2 : 5 ]
return '2.2'
|
def contribute_error_pages ( self ) :
"""Contributes generic static error massage pages to an existing section ."""
|
static_dir = self . settings . STATIC_ROOT
if not static_dir : # Source static directory is not configured . Use temporary .
import tempfile
static_dir = os . path . join ( tempfile . gettempdir ( ) , self . project_name )
self . settings . STATIC_ROOT = static_dir
self . section . routing . set_error_pages ( common_prefix = os . path . join ( static_dir , 'uwsgify' ) )
|
def element ( self ) :
"""Returns the instance of the element who owns the first line
number for the operation in the cached source code ."""
|
# We assume here that the entire operation is associated with a single
# code element . Since the sequence matcher groups operations by contiguous
# lines of code to change , this is a safe assumption .
if self . _element is None :
line = self . icached [ 0 ]
# If we are inserting a new line , the location at the start of the line
# that used to be there interferes with the element finder .
if self . mode == "insert" :
line -= 1
self . _element = self . context . module . get_element ( line , 0 )
return self . _element
|
def equiv ( self , other ) :
"""Return True if other is an equivalent weighting .
Returns
equivalent : bool
` ` True ` ` if ` ` other ` ` is a ` Weighting ` instance with the same
` Weighting . impl ` , which yields the same result as this
weighting for any input , ` ` False ` ` otherwise . This is checked
by entry - wise comparison of arrays / constants ."""
|
# Optimization for equality
if self == other :
return True
elif ( not isinstance ( other , Weighting ) or self . exponent != other . exponent ) :
return False
elif isinstance ( other , MatrixWeighting ) :
return other . equiv ( self )
elif isinstance ( other , ConstWeighting ) :
return np . array_equiv ( self . array , other . const )
else :
return np . array_equal ( self . array , other . array )
|
def delete_kwargs_s ( cls , s , args = None , kwargs = None ) :
"""Deletes the ` ` * args ` ` or ` ` * * kwargs ` ` part from the parameters section
Either ` args ` or ` kwargs ` must not be None .
Parameters
s : str
The string to delete the args and kwargs from
args : None or str
The string for the args to delete
kwargs : None or str
The string for the kwargs to delete
Notes
The type name of ` args ` in ` s ` has to be like ` ` ` ` * < args > ` ` ` ` ( i . e . the
` args ` argument preceeded by a ` ` ' * ' ` ` and enclosed by double ` ` ' ` ' ` ` ) .
Similarily , the type name of ` kwargs ` in ` s ` has to be like
` ` ` ` * * < kwargs > ` ` ` `"""
|
if not args and not kwargs :
return s
types = [ ]
if args is not None :
types . append ( '`?`?\*%s`?`?' % args )
if kwargs is not None :
types . append ( '`?`?\*\*%s`?`?' % kwargs )
return cls . delete_types_s ( s , types )
|
def firmware_autoupgrade_params_pss ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
firmware = ET . SubElement ( config , "firmware" , xmlns = "urn:brocade.com:mgmt:brocade-firmware" )
autoupgrade_params = ET . SubElement ( firmware , "autoupgrade-params" )
pss = ET . SubElement ( autoupgrade_params , "pass" )
pss . text = kwargs . pop ( 'pss' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def __replace_repeat ( sentence ) :
"""Allows the use of repeating random - elements such as in the ' Ten green bottles ' type sentences .
: param sentence :"""
|
# # # # # # USE SENTENCE _ ID 47 for testing !
repeat_dict = { }
if sentence is not None :
while sentence . find ( '#DEFINE_REPEAT' ) != - 1 :
begin_index = sentence . find ( '#DEFINE_REPEAT' )
start_index = begin_index + 15
end_index = sentence . find ( ']' )
if sentence . find ( '#DEFINE_REPEAT' ) is not None :
sub_list = sentence [ start_index : end_index ] . split ( ',' )
choice = sub_list [ 0 ]
repeat_text = sub_list [ 1 ]
repeat_dict [ choice ] = repeat_text
sentence = sentence . replace ( sentence [ begin_index : end_index + 1 ] , '' , 1 )
while sentence . find ( '#REPEAT' ) != - 1 :
if sentence . find ( '#REPEAT' ) is not None :
repeat_begin_index = sentence . find ( '#REPEAT' )
repeat_start_index = repeat_begin_index + 8
# by searching from repeat _ index below we don ' t encounter dodgy bracket - matching errors .
repeat_end_index = sentence . find ( ']' , repeat_start_index )
repeat_index = sentence [ repeat_start_index : repeat_end_index ]
if repeat_index in repeat_dict :
sentence = sentence . replace ( sentence [ repeat_begin_index : repeat_end_index + 1 ] , str ( repeat_dict [ repeat_index ] ) )
if sentence . find ( '#REPEAT' ) == - 1 :
return sentence
return sentence
else :
return sentence
|
def _diff_group_position ( group ) :
"""Generate a unified diff position line for a diff group"""
|
old_start = group [ 0 ] [ 0 ]
new_start = group [ 0 ] [ 1 ]
old_length = new_length = 0
for old_line , new_line , line_or_conflict in group :
if isinstance ( line_or_conflict , tuple ) :
old , new = line_or_conflict
old_length += len ( old )
new_length += len ( new )
else :
old_length += 1
new_length += 1
if old_length :
old_start += 1
if new_length :
new_start += 1
return color . LineNumber ( '@@ -%s,%s +%s,%s @@' % ( old_start , old_length , new_start , new_length ) )
|
def nl_complete_msg ( sk , msg ) :
"""Finalize Netlink message .
https : / / github . com / thom311 / libnl / blob / libnl3_2_25 / lib / nl . c # L450
This function finalizes a Netlink message by completing the message with desirable flags and values depending on the
socket configuration .
- If not yet filled out , the source address of the message ( ` nlmsg _ pid ` ) will be set to the local port number of the
socket .
- If not yet specified , the next available sequence number is assigned to the message ( ` nlmsg _ seq ` ) .
- If not yet specified , the protocol field of the message will be set to the protocol field of the socket .
- The ` NLM _ F _ REQUEST ` Netlink message flag will be set .
- The ` NLM _ F _ ACK ` flag will be set if Auto - ACK mode is enabled on the socket .
Positional arguments :
sk - - Netlink socket ( nl _ sock class instance ) .
msg - - Netlink message ( nl _ msg class instance ) ."""
|
nlh = msg . nm_nlh
if nlh . nlmsg_pid == NL_AUTO_PORT :
nlh . nlmsg_pid = nl_socket_get_local_port ( sk )
if nlh . nlmsg_seq == NL_AUTO_SEQ :
nlh . nlmsg_seq = sk . s_seq_next
sk . s_seq_next += 1
if msg . nm_protocol == - 1 :
msg . nm_protocol = sk . s_proto
nlh . nlmsg_flags |= NLM_F_REQUEST
if not sk . s_flags & NL_NO_AUTO_ACK :
nlh . nlmsg_flags |= NLM_F_ACK
|
def add_cpds ( self , * cpds ) :
"""Add CPD ( Conditional Probability Distribution ) to the Bayesian Model .
Parameters
cpds : list , set , tuple ( array - like )
List of CPDs which will be associated with the model
EXAMPLE
> > > from pgmpy . models import BayesianModel
> > > from pgmpy . factors . discrete . CPD import TabularCPD
> > > student = BayesianModel ( [ ( ' diff ' , ' grades ' ) , ( ' intel ' , ' grades ' ) ] )
> > > grades _ cpd = TabularCPD ( ' grades ' , 3 , [ [ 0.1,0.1,0.1,0.1,0.1,0.1 ] ,
. . . [ 0.1,0.1,0.1,0.1,0.1,0.1 ] ,
. . . [ 0.8,0.8,0.8,0.8,0.8,0.8 ] ] ,
. . . evidence = [ ' diff ' , ' intel ' ] , evidence _ card = [ 2 , 3 ] )
> > > student . add _ cpds ( grades _ cpd )
| diff : | easy | hard |
| intel : | dumb | avg | smart | dumb | avg | smart |
| gradeA | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 |
| gradeB | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 |
| gradeC | 0.8 | 0.8 | 0.8 | 0.8 | 0.8 | 0.8 |"""
|
for cpd in cpds :
if not isinstance ( cpd , ( TabularCPD , ContinuousFactor ) ) :
raise ValueError ( 'Only TabularCPD or ContinuousFactor can be added.' )
if set ( cpd . scope ( ) ) - set ( cpd . scope ( ) ) . intersection ( set ( self . nodes ( ) ) ) :
raise ValueError ( 'CPD defined on variable not in the model' , cpd )
for prev_cpd_index in range ( len ( self . cpds ) ) :
if self . cpds [ prev_cpd_index ] . variable == cpd . variable :
logging . warning ( "Replacing existing CPD for {var}" . format ( var = cpd . variable ) )
self . cpds [ prev_cpd_index ] = cpd
break
else :
self . cpds . append ( cpd )
|
def shutdown ( self , force = False ) :
"""Shut down and power off the HMC represented by this Console object .
While the HMC is powered off , any Python resource objects retrieved
from this HMC may raise exceptions upon further use .
In order to continue using Python resource objects retrieved from this
HMC , the HMC needs to be started again ( e . g . by powering it on
locally ) . Once the HMC is available again , Python resource objects
retrieved from that HMC can continue to be used .
An automatic re - logon will be performed under the covers , because the
HMC startup invalidates the currently used HMC session .
Authorization requirements :
* Task permission for the " Shutdown / Restart " task .
* " Remote Shutdown " must be enabled on the HMC .
Parameters :
force ( bool ) :
Boolean controlling whether the shutdown operation is processed
when users are connected ( ` True ` ) or not ( ` False ` ) . Users in this
sense are local or remote GUI users . HMC WS API clients do not
count as users for this purpose .
Raises :
: exc : ` ~ zhmcclient . HTTPError `
: exc : ` ~ zhmcclient . ParseError `
: exc : ` ~ zhmcclient . AuthError `
: exc : ` ~ zhmcclient . ConnectionError `"""
|
body = { 'force' : force }
self . manager . session . post ( self . uri + '/operations/shutdown' , body = body )
|
def write ( self , buffer = bytes ( ) , address = 0 , count = 0 ) :
"""Writes the content of the * buffer * to the : attr : ` cache ` beginning
at the start * address * .
: param bytes buffer : content to write .
: param int address : start address .
: param int count : number of bytes to write to the cache ."""
|
view = memoryview ( self . _cache )
view [ address : address + count ] = buffer
|
def main ( ) :
"""Provide the entry point in the the modutils command ."""
|
mod_choices = ( 'banned' , 'contributor' , 'moderator' )
mod_choices_dsp = ', ' . join ( [ '`{}`' . format ( x ) for x in mod_choices ] )
msg = { 'add' : ( 'Add users to one of the following categories: {}' . format ( mod_choices_dsp ) ) , 'clear' : 'Remove users who have no flair set.' , 'css' : 'Ignore the CSS field when synchronizing flair.' , 'edit' : 'When adding flair templates, mark them as editable.' , 'file' : 'The file containing contents for --message' , 'flair' : 'List flair for the subreddit.' , 'flair_stats' : 'Display the number of users with each flair.' , 'json' : 'Output the results as json. Applies to --flair' , 'limit' : ( 'The minimum number of users that must have the specified ' 'flair in order to add as a template. default: %default' ) , 'list' : ( 'List the users in one of the following categories: ' '{}. May be specified more than once.' . format ( mod_choices_dsp ) ) , 'msg' : ( 'Send message to users of one of the following categories: ' '{}. Message subject provided via --subject, content provided ' 'via --file or STDIN.' ) . format ( mod_choices_dsp ) , 'sort' : ( 'The order to add flair templates. Available options are ' '`alpha` to add alphabetically, and `size` to first add ' 'flair that is shared by the most number of users. ' 'default: %default' ) , 'static' : ( 'Add this template when syncing flair templates. When ' 'syncing text and css use a comma to separate the two.' ) , 'subject' : 'The subject of the message to send for --message.' , 'sync' : 'Synchronize flair templates with current user flair.' , 'text' : 'Ignore the text field when synchronizing flair.' }
usage = 'Usage: %prog [options] SUBREDDIT'
parser = arg_parser ( usage = usage )
parser . add_option ( '-a' , '--add' , help = msg [ 'add' ] )
parser . add_option ( '-l' , '--list' , action = 'append' , help = msg [ 'list' ] , choices = mod_choices , metavar = 'CATEGORY' , default = [ ] )
parser . add_option ( '-c' , '--clear-empty' , action = 'store_true' , help = msg [ 'clear' ] )
parser . add_option ( '-F' , '--file' , help = msg [ 'file' ] )
parser . add_option ( '-f' , '--flair' , action = 'store_true' , help = msg [ 'flair' ] )
parser . add_option ( '' , '--flair-stats' , action = 'store_true' , help = msg [ 'flair_stats' ] )
parser . add_option ( '-m' , '--message' , choices = mod_choices , help = msg [ 'msg' ] )
parser . add_option ( '' , '--subject' , help = msg [ 'subject' ] )
group = OptionGroup ( parser , 'Format options' )
group . add_option ( '-j' , '--json' , action = 'store_true' , help = msg [ 'json' ] )
parser . add_option_group ( group )
group = OptionGroup ( parser , 'Sync options' )
group . add_option ( '' , '--sync' , action = 'store_true' , help = msg [ 'sync' ] )
group . add_option ( '-s' , '--static' , action = 'append' , help = msg [ 'static' ] )
group . add_option ( '' , '--editable' , action = 'store_true' , help = msg [ 'edit' ] )
group . add_option ( '' , '--ignore-css' , action = 'store_true' , default = False , help = msg [ 'css' ] )
group . add_option ( '' , '--ignore-text' , action = 'store_true' , default = False , help = msg [ 'text' ] )
group . add_option ( '' , '--limit' , type = 'int' , help = msg [ 'limit' ] , default = 2 )
group . add_option ( '' , '--sort' , action = 'store' , choices = ( 'alpha' , 'size' ) , default = 'alpha' , help = msg [ 'sort' ] )
parser . add_option_group ( group )
options , args = parser . parse_args ( )
if len ( args ) == 0 :
parser . error ( 'Must provide subreddit name.' )
if options . message and not options . subject :
parser . error ( 'Must provide --subject when providing --message.' )
subreddit = args [ 0 ]
check_for_updates ( options )
modutils = ModUtils ( subreddit , options . site , options . verbose )
if options . add :
modutils . add_users ( options . add )
if options . clear_empty :
modutils . clear_empty ( )
for category in options . list :
modutils . output_list ( category )
if options . flair :
modutils . output_current_flair ( as_json = options . json )
if options . flair_stats :
modutils . output_flair_stats ( )
if options . sync :
modutils . flair_template_sync ( editable = options . editable , limit = options . limit , static = options . static , sort = options . sort , use_css = not options . ignore_css , use_text = not options . ignore_text )
if options . message :
modutils . message ( options . message , options . subject , options . file )
|
def ciphertext_length ( header , plaintext_length ) :
"""Calculates the complete ciphertext message length , given a complete header .
: param header : Complete message header object
: type header : aws _ encryption _ sdk . structures . MessageHeader
: param int plaintext _ length : Length of plaintext in bytes
: rtype : int"""
|
ciphertext_length = header_length ( header )
ciphertext_length += body_length ( header , plaintext_length )
ciphertext_length += footer_length ( header )
return ciphertext_length
|
def _submit_bundle ( cmd_args , app ) :
"""Submit an existing bundle to the service"""
|
sac = streamsx . rest . StreamingAnalyticsConnection ( service_name = cmd_args . service_name )
sas = sac . get_streaming_analytics ( )
sr = sas . submit_job ( bundle = app . app , job_config = app . cfg [ ctx . ConfigParams . JOB_CONFIG ] )
if 'exception' in sr :
rc = 1
elif 'status_code' in sr :
try :
rc = 0 if int ( sr [ 'status_code' ] == 200 ) else 1
except :
rc = 1
elif 'id' in sr or 'jobId' in sr :
rc = 0
sr [ 'return_code' ] = rc
return sr
|
def put_tagging ( Bucket , region = None , key = None , keyid = None , profile = None , ** kwargs ) :
'''Given a valid config , update the tags for a bucket .
Returns { updated : true } if tags were updated and returns
{ updated : False } if tags were not updated .
CLI Example :
. . code - block : : bash
salt myminion boto _ s3 _ bucket . put _ tagging my _ bucket my _ role [ . . . ]'''
|
try :
conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile )
tagslist = [ ]
for k , v in six . iteritems ( kwargs ) :
if six . text_type ( k ) . startswith ( '__' ) :
continue
tagslist . append ( { 'Key' : six . text_type ( k ) , 'Value' : six . text_type ( v ) } )
conn . put_bucket_tagging ( Bucket = Bucket , Tagging = { 'TagSet' : tagslist , } )
return { 'updated' : True , 'name' : Bucket }
except ClientError as e :
return { 'updated' : False , 'error' : __utils__ [ 'boto3.get_error' ] ( e ) }
|
def assemble ( self , roboset = None , color = None , format = None , bgset = None , sizex = 300 , sizey = 300 ) :
"""Build our Robot !
Returns the robot image itself ."""
|
# Allow users to manually specify a robot ' set ' that they like .
# Ensure that this is one of the allowed choices , or allow all
# If they don ' t set one , take the first entry from sets above .
if roboset == 'any' :
roboset = self . sets [ self . hasharray [ 1 ] % len ( self . sets ) ]
elif roboset in self . sets :
roboset = roboset
else :
roboset = self . sets [ 0 ]
# Only set1 is setup to be color - seletable . The others don ' t have enough pieces in various colors .
# This could / should probably be expanded at some point . .
# Right now , this feature is almost never used . ( It was < 44 requests this year , out of 78M reqs )
if roboset == 'set1' :
if color in self . colors :
roboset = 'set1/' + color
else :
randomcolor = self . colors [ self . hasharray [ 0 ] % len ( self . colors ) ]
roboset = 'set1/' + randomcolor
# If they specified a background , ensure it ' s legal , then give it to them .
if bgset in self . bgsets :
bgset = bgset
elif bgset == 'any' :
bgset = self . bgsets [ self . hasharray [ 2 ] % len ( self . bgsets ) ]
# If we set a format based on extension earlier , use that . Otherwise , PNG .
if format is None :
format = self . format
# Each directory in our set represents one piece of the Robot , such as the eyes , nose , mouth , etc .
# Each directory is named with two numbers - The number before the # is the sort order .
# This ensures that they always go in the same order when choosing pieces , regardless of OS .
# The second number is the order in which to apply the pieces .
# For instance , the head has to go down BEFORE the eyes , or the eyes would be hidden .
# First , we ' ll get a list of parts of our robot .
roboparts = self . _get_list_of_files ( self . resourcedir + 'sets/' + roboset )
# Now that we ' ve sorted them by the first number , we need to sort each sub - category by the second .
roboparts . sort ( key = lambda x : x . split ( "#" ) [ 1 ] )
if bgset is not None :
bglist = [ ]
backgrounds = natsort . natsorted ( os . listdir ( self . resourcedir + 'backgrounds/' + bgset ) )
backgrounds . sort ( )
for ls in backgrounds :
if not ls . startswith ( "." ) :
bglist . append ( self . resourcedir + 'backgrounds/' + bgset + "/" + ls )
background = bglist [ self . hasharray [ 3 ] % len ( bglist ) ]
# Paste in each piece of the Robot .
roboimg = Image . open ( roboparts [ 0 ] )
roboimg = roboimg . resize ( ( 1024 , 1024 ) )
for png in roboparts :
img = Image . open ( png )
img = img . resize ( ( 1024 , 1024 ) )
roboimg . paste ( img , ( 0 , 0 ) , img )
# If we ' re a BMP , flatten the image .
if format == 'bmp' : # Flatten bmps
r , g , b , a = roboimg . split ( )
roboimg = Image . merge ( "RGB" , ( r , g , b ) )
if bgset is not None :
bg = Image . open ( background )
bg = bg . resize ( ( 1024 , 1024 ) )
bg . paste ( roboimg , ( 0 , 0 ) , roboimg )
roboimg = bg
self . img = roboimg . resize ( ( sizex , sizey ) , Image . ANTIALIAS )
self . format = format
|
def on_clipboard_mode_change ( self , clipboard_mode ) :
"""Notification when the shared clipboard mode changes .
in clipboard _ mode of type : class : ` ClipboardMode `
The new shared clipboard mode ."""
|
if not isinstance ( clipboard_mode , ClipboardMode ) :
raise TypeError ( "clipboard_mode can only be an instance of type ClipboardMode" )
self . _call ( "onClipboardModeChange" , in_p = [ clipboard_mode ] )
|
def poll ( self , id ) :
"""Poll with a given id .
Parameters
id : int
Poll id .
Returns
an : class : ` ApiQuery ` of : class : ` Poll `
Raises
: class : ` NotFound `
If a poll with the requested id doesn ' t exist ."""
|
@ api_query ( 'poll' , pollid = str ( id ) )
async def result ( _ , root ) :
elem = root . find ( 'POLL' )
if not elem :
raise NotFound ( f'No poll found with id {id}' )
return Poll ( elem )
return result ( self )
|
def Nu_vertical_cylinder_Al_Arabi_Khamis ( Pr , Gr , L , D , turbulent = None ) :
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to [ 1 ] _ , also as presented in [ 2 ] _ and [ 3 ] _ .
. . math : :
Nu _ H = 2.9Ra _ H ^ { 0.25 } / Gr _ D ^ { 1/12 } , \ ; 9.88 \ times 10 ^ 7 \ le Ra _ H \ le 2.7 \ times10 ^ { 9}
Nu _ H = 0.47 Ra _ H ^ { 0.333 } / Gr _ D ^ { 1/12 } , \ ; 2.7 \ times 10 ^ 9 \ le Ra _ H \ le 2.95 \ times10 ^ { 10}
Parameters
Pr : float
Prandtl number [ - ]
Gr : float
Grashof number with respect to cylinder height [ - ]
L : float
Length of vertical cylinder , [ m ]
D : float
Diameter of cylinder , [ m ]
turbulent : bool or None , optional
Whether or not to force the correlation to return the turbulent
result ; will return the laminar regime if False ; leave as None for
automatic selection
Returns
Nu : float
Nusselt number , [ - ]
Notes
For air . Local Nusselt number results also given in [ 1 ] _ . D from 12.75 to
51 mm ; H from 300 to 2000 mm . Temperature kept constant by steam condensing .
If outside of range , no warning is given . Applies for range of :
. . math : :
1.08 \ times 10 ^ 4 \ le Gr _ D \ le 6.9 \ times 10 ^ 5
Examples
> > > Nu _ vertical _ cylinder _ Al _ Arabi _ Khamis ( . 71 , 2E10 , 10 , 1)
280.39793209114765
References
. . [ 1 ] Al - Arabi , M . , and M . Khamis . " Natural Convection Heat Transfer from
Inclined Cylinders . " International Journal of Heat and Mass Transfer 25,
no . 1 ( January 1982 ) : 3-15 . doi : 10.1016/0017-9310(82)90229-0.
. . [ 2 ] Popiel , Czeslaw O . " Free Convection Heat Transfer from Vertical
Slender Cylinders : A Review . " Heat Transfer Engineering 29 , no . 6
( June 1 , 2008 ) : 521-36 . doi : 10.1080/01457630801891557.
. . [ 3 ] Boetcher , Sandra K . S . " Natural Convection Heat Transfer From
Vertical Cylinders . " In Natural Convection from Circular Cylinders ,
23-42 . Springer , 2014.'''
|
Gr_D = Gr / L ** 3 * D ** 3
Ra = Pr * Gr
if turbulent or ( Ra > 2.6E9 and turbulent is None ) :
return 0.47 * Ra ** ( 1 / 3. ) * Gr_D ** ( - 1 / 12. )
else :
return 2.9 * Ra ** 0.25 * Gr_D ** ( - 1 / 12. )
|
def expire_hit ( self , hit_id ) :
"""Expire a HIT , which will change its status to " Reviewable " ,
allowing it to be deleted ."""
|
try :
self . mturk . update_expiration_for_hit ( HITId = hit_id , ExpireAt = 0 )
except Exception as ex :
raise MTurkServiceException ( "Failed to expire HIT {}: {}" . format ( hit_id , str ( ex ) ) )
return True
|
def setCurveModel ( self , model ) :
"""Sets the stimulus model for the calibration curve test
: param model : Stimulus model that has a tone curve configured
: type model : : class : ` StimulusModel < sparkle . stim . stimulus _ model . StimulusModel > `"""
|
self . stimModel = model
self . ui . curveWidget . setModel ( model )
|
def parse_section_extras_require ( self , section_options ) :
"""Parses ` extras _ require ` configuration file section .
: param dict section _ options :"""
|
parse_list = partial ( self . _parse_list , separator = ';' )
self [ 'extras_require' ] = self . _parse_section_to_dict ( section_options , parse_list )
|
def format_listeners ( elb_settings = None , env = 'dev' , region = 'us-east-1' ) :
"""Format ELB Listeners into standard list .
Args :
elb _ settings ( dict ) : ELB settings including ELB Listeners to add ,
e . g . : :
# old
" certificate " : null ,
" i _ port " : 8080,
" lb _ port " : 80,
" subnet _ purpose " : " internal " ,
" target " : " HTTP : 8080 / health "
# new
" ports " : [
" instance " : " HTTP : 8080 " ,
" loadbalancer " : " HTTP : 80"
" certificate " : " cert _ name " ,
" instance " : " HTTP : 8443 " ,
" loadbalancer " : " HTTPS : 443"
" subnet _ purpose " : " internal " ,
" target " : " HTTP : 8080 / health "
env ( str ) : Environment to find the Account Number for .
Returns :
list : ELB Listeners formatted into dicts for Spinnaker : :
' externalPort ' : 80,
' externalProtocol ' : ' HTTP ' ,
' internalPort ' : 8080,
' internalProtocol ' : ' HTTP ' ,
' sslCertificateId ' : None ,
' listenerPolicies ' : [ ] ,
' backendPolicies ' : [ ]"""
|
LOG . debug ( 'ELB settings:\n%s' , elb_settings )
credential = get_env_credential ( env = env )
account = credential [ 'accountId' ]
listeners = [ ]
if 'ports' in elb_settings :
for listener in elb_settings [ 'ports' ] :
cert_name = format_cert_name ( env = env , region = region , account = account , certificate = listener . get ( 'certificate' , None ) )
lb_proto , lb_port = listener [ 'loadbalancer' ] . split ( ':' )
i_proto , i_port = listener [ 'instance' ] . split ( ':' )
listener_policies = listener . get ( 'policies' , [ ] )
listener_policies += listener . get ( 'listener_policies' , [ ] )
backend_policies = listener . get ( 'backend_policies' , [ ] )
elb_data = { 'externalPort' : int ( lb_port ) , 'externalProtocol' : lb_proto . upper ( ) , 'internalPort' : int ( i_port ) , 'internalProtocol' : i_proto . upper ( ) , 'sslCertificateId' : cert_name , 'listenerPolicies' : listener_policies , 'backendPolicies' : backend_policies , }
listeners . append ( elb_data )
else :
listener_policies = elb_settings . get ( 'policies' , [ ] )
listener_policies += elb_settings . get ( 'listener_policies' , [ ] )
backend_policies = elb_settings . get ( 'backend_policies' , [ ] )
listeners = [ { 'externalPort' : int ( elb_settings [ 'lb_port' ] ) , 'externalProtocol' : elb_settings [ 'lb_proto' ] , 'internalPort' : int ( elb_settings [ 'i_port' ] ) , 'internalProtocol' : elb_settings [ 'i_proto' ] , 'sslCertificateId' : elb_settings [ 'certificate' ] , 'listenerPolicies' : listener_policies , 'backendPolicies' : backend_policies , } ]
for listener in listeners :
LOG . info ( 'ELB Listener:\n' 'loadbalancer %(externalProtocol)s:%(externalPort)d\n' 'instance %(internalProtocol)s:%(internalPort)d\n' 'certificate: %(sslCertificateId)s\n' 'listener_policies: %(listenerPolicies)s\n' 'backend_policies: %(backendPolicies)s' , listener )
return listeners
|
def p_iteration_statement_4 ( self , p ) :
"""iteration _ statement : FOR LPAREN left _ hand _ side _ expr IN expr RPAREN statement"""
|
p [ 0 ] = self . asttypes . ForIn ( item = p [ 3 ] , iterable = p [ 5 ] , statement = p [ 7 ] )
p [ 0 ] . setpos ( p )
|
def parse_binary ( self , data , display , rawdict = 0 ) :
"""values , remdata = s . parse _ binary ( data , display , rawdict = 0)
Convert a binary representation of the structure into Python values .
DATA is a string or a buffer containing the binary data .
DISPLAY should be a Xlib . protocol . display . Display object if
there are any Resource fields or Lists with ResourceObjs .
The Python values are returned as VALUES . If RAWDICT is true ,
a Python dictionary is returned , where the keys are field
names and the values are the corresponding Python value . If
RAWDICT is false , a DictWrapper will be returned where all
fields are available as attributes .
REMDATA are the remaining binary data , unused by the Struct object ."""
|
ret = { }
val = struct . unpack ( self . static_codes , data [ : self . static_size ] )
lengths = { }
formats = { }
vno = 0
for f in self . static_fields : # Fields without name should be ignored . This is typically
# pad and constant fields
if not f . name :
pass
# Store index in val for Length and Format fields , to be used
# when treating varfields .
elif isinstance ( f , LengthField ) :
f_names = [ f . name ]
if f . other_fields :
f_names . extend ( f . other_fields )
field_val = val [ vno ]
if f . parse_value is not None :
field_val = f . parse_value ( field_val , display )
for f_name in f_names :
lengths [ f_name ] = field_val
elif isinstance ( f , FormatField ) :
formats [ f . name ] = val [ vno ]
# Treat value fields the same was as in parse _ value .
else :
if f . structvalues == 1 :
field_val = val [ vno ]
else :
field_val = val [ vno : vno + f . structvalues ]
if f . parse_value is not None :
field_val = f . parse_value ( field_val , display )
ret [ f . name ] = field_val
vno = vno + f . structvalues
data = data [ self . static_size : ]
# Call parse _ binary _ value for each var _ field , passing the
# length and format values from the unpacked val .
for f in self . var_fields :
ret [ f . name ] , data = f . parse_binary_value ( data , display , lengths . get ( f . name ) , formats . get ( f . name ) , )
if not rawdict :
ret = DictWrapper ( ret )
return ret , data
|
def clearDay ( self , date ) :
"""Remove all stored information about this date ( meals or closed
information ) .
: param date : Date of the day
: type date : datetime . date"""
|
date = self . _handleDate ( date )
if date in self . _days :
del self . _days [ date ]
|
def _status_code_check ( self , response : Dict [ str , Any ] ) :
"""检查响应码并进行对不同的响应进行处理 .
主要包括 :
+ 编码在500 ~ 599段为服务异常 , 直接抛出对应异常
+ 编码在400 ~ 499段为调用异常 , 为对应ID的future设置异常
+ 编码在300 ~ 399段为警告 , 会抛出对应警告
+ 编码在200 ~ 399段为执行成功响应 , 将结果设置给对应ID的future .
+ 编码在100 ~ 199段为服务器响应 , 主要是处理验证响应和心跳响应
Parameters :
response ( Dict [ str , Any ] ) : - 响应的python字典形式数据
Return :
( bool ) : - 如果是非服务异常类的响应 , 那么返回True"""
|
code = response . get ( "CODE" )
if self . debug :
print ( "resv:{}" . format ( response ) )
print ( code )
if code >= 500 :
if self . debug :
print ( "server error" )
return self . _server_error_handler ( code )
elif 500 > code >= 400 :
if self . debug :
print ( "call method error" )
return self . _method_error_handler ( response )
elif 400 > code >= 200 :
if code >= 300 :
self . _warning_handler ( code )
if code in ( 200 , 201 , 202 , 206 , 300 , 301 ) :
if self . debug is True :
print ( "resv resp {}" . format ( response ) )
return self . _method_response_handler ( response )
elif 200 > code >= 100 :
return self . _server_response_handler ( response )
else :
raise MprpcException ( "unknow status code {}" . format ( code ) )
|
def timedelta_to_str ( aTimedelta ) :
"""a conversion function for time deltas to string in the form
DD : HH : MM : SS"""
|
days = aTimedelta . days
temp_seconds = aTimedelta . seconds
hours = int ( temp_seconds / 3600 )
minutes = int ( ( temp_seconds - hours * 3600 ) / 60 )
seconds = temp_seconds - hours * 3600 - minutes * 60
return '%d %02d:%02d:%02d' % ( days , hours , minutes , seconds )
|
def _download_subs ( self , download_link , videofile , referer = '' , sub_title = '' ) :
"""下载字幕
videofile : 视频文件路径
sub _ title : 字幕标题 ( 文件名 )
download _ link : 下载链接
referer : referer"""
|
root = os . path . dirname ( videofile )
name , _ = os . path . splitext ( os . path . basename ( videofile ) )
ext = ''
headers = { 'Referer' : referer }
res = self . session . get ( download_link , headers = headers , stream = True )
referer = res . url
# 尝试从 Content - Disposition 中获取文件后缀名
content_disposition = res . headers . get ( 'Content-Disposition' , '' )
if content_disposition :
_ , params = cgi . parse_header ( content_disposition )
filename = params . get ( 'filename' )
if filename :
_ , ext = os . path . splitext ( filename )
ext = ext [ 1 : ]
if ext == '' : # 尝试从url 中获取文件后缀名
p = urlparse . urlparse ( res . url )
path = p . path
if path :
_ , ext = os . path . splitext ( path )
ext = ext [ 1 : ]
if ext == '' : # 尝试从字幕标题中获取文件后缀名
_ , ext = os . path . splitext ( sub_title )
ext = ext [ 1 : ]
filename = '{}.{}' . format ( name , ext )
filepath = os . path . join ( root , filename )
with open ( filepath , 'wb' ) as fp :
for chunk in res . iter_content ( 8192 ) :
fp . write ( chunk )
return filepath , referer
|
def _ellipsoid_phantom_3d ( space , ellipsoids ) :
"""Create an ellipsoid phantom in 3d space .
Parameters
space : ` DiscreteLp `
Space in which the phantom should be generated . If ` ` space . shape ` ` is
1 in an axis , a corresponding slice of the phantom is created
( instead of squashing the whole phantom into the slice ) .
ellipsoids : list of lists
Each row should contain the entries : :
' value ' ,
' axis _ 1 ' , ' axis _ 2 ' , ' axis _ 3 ' ,
' center _ x ' , ' center _ y ' , ' center _ z ' ,
' rotation _ phi ' , ' rotation _ theta ' , ' rotation _ psi '
The provided ellipsoids need to be specified relative to the
reference cube ` ` [ - 1 , - 1 , - 1 ] x [ 1 , 1 , 1 ] ` ` . Angles are to be given
in radians .
Returns
phantom : ` ` space ` ` element
3D ellipsoid phantom in ` ` space ` ` .
See Also
shepp _ logan : The typical use - case for this function ."""
|
# Blank volume
p = np . zeros ( space . shape , dtype = space . dtype )
minp = space . grid . min_pt
maxp = space . grid . max_pt
# Create the pixel grid
grid_in = space . grid . meshgrid
# Move points to [ - 1 , 1]
grid = [ ]
for i in range ( 3 ) :
mean_i = ( minp [ i ] + maxp [ i ] ) / 2.0
# Where space . shape = 1 , we have minp = maxp , so we set diff _ i = 1
# to avoid division by zero . Effectively , this allows constructing
# a slice of a 3D phantom .
diff_i = ( maxp [ i ] - minp [ i ] ) / 2.0 or 1.0
grid . append ( ( grid_in [ i ] - mean_i ) / diff_i )
for ellip in ellipsoids :
assert len ( ellip ) == 10
intensity = ellip [ 0 ]
a_squared = ellip [ 1 ] ** 2
b_squared = ellip [ 2 ] ** 2
c_squared = ellip [ 3 ] ** 2
x0 = ellip [ 4 ]
y0 = ellip [ 5 ]
z0 = ellip [ 6 ]
phi = ellip [ 7 ]
theta = ellip [ 8 ]
psi = ellip [ 9 ]
scales = [ 1 / a_squared , 1 / b_squared , 1 / c_squared ]
center = ( np . array ( [ x0 , y0 , z0 ] ) + 1.0 ) / 2.0
# Create the offset x , y and z values for the grid
if any ( [ phi , theta , psi ] ) : # Rotate the points to the expected coordinate system .
cphi = np . cos ( phi )
sphi = np . sin ( phi )
ctheta = np . cos ( theta )
stheta = np . sin ( theta )
cpsi = np . cos ( psi )
spsi = np . sin ( psi )
mat = np . array ( [ [ cpsi * cphi - ctheta * sphi * spsi , cpsi * sphi + ctheta * cphi * spsi , spsi * stheta ] , [ - spsi * cphi - ctheta * sphi * cpsi , - spsi * sphi + ctheta * cphi * cpsi , cpsi * stheta ] , [ stheta * sphi , - stheta * cphi , ctheta ] ] )
# Calculate the points that could possibly be inside the volume
# Since the points are rotated , we cannot do anything directional
# without more logic
max_radius = np . sqrt ( np . abs ( mat ) . dot ( [ a_squared , b_squared , c_squared ] ) )
idx , shapes = _getshapes_3d ( center , max_radius , space . shape )
subgrid = [ g [ idi ] for g , idi in zip ( grid , shapes ) ]
offset_points = [ vec * ( xi - x0i ) [ ... , None ] for xi , vec , x0i in zip ( subgrid , mat . T , [ x0 , y0 , z0 ] ) ]
rotated = offset_points [ 0 ] + offset_points [ 1 ] + offset_points [ 2 ]
np . square ( rotated , out = rotated )
radius = np . dot ( rotated , scales )
else : # Calculate the points that could possibly be inside the volume
max_radius = np . sqrt ( [ a_squared , b_squared , c_squared ] )
idx , shapes = _getshapes_3d ( center , max_radius , space . shape )
subgrid = [ g [ idi ] for g , idi in zip ( grid , shapes ) ]
squared_dist = [ ai * ( xi - x0i ) ** 2 for xi , ai , x0i in zip ( subgrid , scales , [ x0 , y0 , z0 ] ) ]
# Parentheses to get best order for broadcasting
radius = squared_dist [ 0 ] + ( squared_dist [ 1 ] + squared_dist [ 2 ] )
# Find the points within the ellipse
inside = radius <= 1
# Add the ellipse intensity to those points
p [ idx ] [ inside ] += intensity
return space . element ( p )
|
def updateNewsBulletin ( self , msgId , msgType , newsMessage , originExch ) :
"""updateNewsBulletin ( EWrapper self , int msgId , int msgType , IBString const & newsMessage , IBString const & originExch )"""
|
return _swigibpy . EWrapper_updateNewsBulletin ( self , msgId , msgType , newsMessage , originExch )
|
def into_hold ( self , name , obj ) :
"""Add data into the a storage area provided by the framework .
Note : The data is stored with the thread local instance .
: param name : name of the data to be stored
: param obj : data to be stored
: returns : N / A
: raises : N / A"""
|
logger . debug ( 'StackInABox({0}): Holding onto {1} of type {2} ' 'with id {3}' . format ( self . __id , name , type ( obj ) , id ( obj ) ) )
self . holds [ name ] = obj
|
def api_stop ( server_state ) :
"""Stop the global API server thread"""
|
api_srv = server_state [ 'api' ]
if api_srv is not None :
log . info ( "Shutting down API" )
api_srv . stop_server ( )
api_srv . join ( )
log . info ( "API server joined" )
else :
log . info ( "API already joined" )
server_state [ 'api' ] = None
|
def get_size ( self , value = None ) :
"""Return the action length including the padding ( multiple of 8 ) ."""
|
if isinstance ( value , ActionHeader ) :
return value . get_size ( )
elif value is None :
current_size = super ( ) . get_size ( )
return ceil ( current_size / 8 ) * 8
raise ValueError ( f'Invalid value "{value}" for Action*.get_size()' )
|
def fragment6 ( pkt , fragSize ) :
"""Performs fragmentation of an IPv6 packet . Provided packet ( ' pkt ' ) must
already contain an IPv6ExtHdrFragment ( ) class . ' fragSize ' argument is the
expected maximum size of fragments ( MTU ) . The list of packets is returned .
If packet does not contain an IPv6ExtHdrFragment class , it is returned in
result list ."""
|
pkt = pkt . copy ( )
if IPv6ExtHdrFragment not in pkt : # TODO : automatically add a fragment before upper Layer
# at the moment , we do nothing and return initial packet
# as single element of a list
return [ pkt ]
# If the payload is bigger than 65535 , a Jumbo payload must be used , as
# an IPv6 packet can ' t be bigger than 65535 bytes .
if len ( raw ( pkt [ IPv6ExtHdrFragment ] ) ) > 65535 :
warning ( "An IPv6 packet can'be bigger than 65535, please use a Jumbo payload." )
# noqa : E501
return [ ]
s = raw ( pkt )
# for instantiation to get upper layer checksum right
if len ( s ) <= fragSize :
return [ pkt ]
# Fragmentable part : fake IPv6 for Fragmentable part length computation
fragPart = pkt [ IPv6ExtHdrFragment ] . payload
tmp = raw ( IPv6 ( src = "::1" , dst = "::1" ) / fragPart )
fragPartLen = len ( tmp ) - 40
# basic IPv6 header length
fragPartStr = s [ - fragPartLen : ]
# Grab Next Header for use in Fragment Header
nh = pkt [ IPv6ExtHdrFragment ] . nh
# Keep fragment header
fragHeader = pkt [ IPv6ExtHdrFragment ]
del fragHeader . payload
# detach payload
# Unfragmentable Part
unfragPartLen = len ( s ) - fragPartLen - 8
unfragPart = pkt
del pkt [ IPv6ExtHdrFragment ] . underlayer . payload
# detach payload
# Cut the fragmentable part to fit fragSize . Inner fragments have
# a length that is an integer multiple of 8 octets . last Frag MTU
# can be anything below MTU
lastFragSize = fragSize - unfragPartLen - 8
innerFragSize = lastFragSize - ( lastFragSize % 8 )
if lastFragSize <= 0 or innerFragSize == 0 :
warning ( "Provided fragment size value is too low. " + "Should be more than %d" % ( unfragPartLen + 8 ) )
return [ unfragPart / fragHeader / fragPart ]
remain = fragPartStr
res = [ ]
fragOffset = 0
# offset , incremeted during creation
fragId = random . randint ( 0 , 0xffffffff )
# random id . . .
if fragHeader . id is not None : # . . . except id provided by user
fragId = fragHeader . id
fragHeader . m = 1
fragHeader . id = fragId
fragHeader . nh = nh
# Main loop : cut , fit to FRAGSIZEs , fragOffset , Id . . .
while True :
if ( len ( remain ) > lastFragSize ) :
tmp = remain [ : innerFragSize ]
remain = remain [ innerFragSize : ]
fragHeader . offset = fragOffset
# update offset
fragOffset += ( innerFragSize // 8 )
# compute new one
if IPv6 in unfragPart :
unfragPart [ IPv6 ] . plen = None
tempo = unfragPart / fragHeader / conf . raw_layer ( load = tmp )
res . append ( tempo )
else :
fragHeader . offset = fragOffset
# update offSet
fragHeader . m = 0
if IPv6 in unfragPart :
unfragPart [ IPv6 ] . plen = None
tempo = unfragPart / fragHeader / conf . raw_layer ( load = remain )
res . append ( tempo )
break
return res
|
def nlargest ( self , n , columns , keep = 'first' ) :
"""Return the first ` n ` rows ordered by ` columns ` in descending order .
Return the first ` n ` rows with the largest values in ` columns ` , in
descending order . The columns that are not specified are returned as
well , but not used for ordering .
This method is equivalent to
` ` df . sort _ values ( columns , ascending = False ) . head ( n ) ` ` , but more
performant .
Parameters
n : int
Number of rows to return .
columns : label or list of labels
Column label ( s ) to order by .
keep : { ' first ' , ' last ' , ' all ' } , default ' first '
Where there are duplicate values :
- ` first ` : prioritize the first occurrence ( s )
- ` last ` : prioritize the last occurrence ( s )
- ` ` all ` ` : do not drop any duplicates , even it means
selecting more than ` n ` items .
. . versionadded : : 0.24.0
Returns
DataFrame
The first ` n ` rows ordered by the given columns in descending
order .
See Also
DataFrame . nsmallest : Return the first ` n ` rows ordered by ` columns ` in
ascending order .
DataFrame . sort _ values : Sort DataFrame by the values .
DataFrame . head : Return the first ` n ` rows without re - ordering .
Notes
This function cannot be used with all column types . For example , when
specifying columns with ` object ` or ` category ` dtypes , ` ` TypeError ` ` is
raised .
Examples
> > > df = pd . DataFrame ( { ' population ' : [ 5900000 , 6500000 , 434000,
. . . 434000 , 434000 , 337000 , 11300,
. . . 11300 , 11300 ] ,
. . . ' GDP ' : [ 1937894 , 2583560 , 12011 , 4520 , 12128,
. . . 17036 , 182 , 38 , 311 ] ,
. . . ' alpha - 2 ' : [ " IT " , " FR " , " MT " , " MV " , " BN " ,
. . . " IS " , " NR " , " TV " , " AI " ] } ,
. . . index = [ " Italy " , " France " , " Malta " ,
. . . " Maldives " , " Brunei " , " Iceland " ,
. . . " Nauru " , " Tuvalu " , " Anguilla " ] )
> > > df
population GDP alpha - 2
Italy 5900000 1937894 IT
France 6500000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example , we will use ` ` nlargest ` ` to select the three
rows having the largest values in column " population " .
> > > df . nlargest ( 3 , ' population ' )
population GDP alpha - 2
France 6500000 2583560 FR
Italy 5900000 1937894 IT
Malta 434000 12011 MT
When using ` ` keep = ' last ' ` ` , ties are resolved in reverse order :
> > > df . nlargest ( 3 , ' population ' , keep = ' last ' )
population GDP alpha - 2
France 6500000 2583560 FR
Italy 5900000 1937894 IT
Brunei 434000 12128 BN
When using ` ` keep = ' all ' ` ` , all duplicate items are maintained :
> > > df . nlargest ( 3 , ' population ' , keep = ' all ' )
population GDP alpha - 2
France 6500000 2583560 FR
Italy 5900000 1937894 IT
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
To order by the largest values in column " population " and then " GDP " ,
we can specify multiple columns like in the next example .
> > > df . nlargest ( 3 , [ ' population ' , ' GDP ' ] )
population GDP alpha - 2
France 6500000 2583560 FR
Italy 5900000 1937894 IT
Brunei 434000 12128 BN"""
|
return algorithms . SelectNFrame ( self , n = n , keep = keep , columns = columns ) . nlargest ( )
|
def main ( ) :
"""Testing function for Flex Regular Expressions to FST DFA"""
|
if len ( argv ) < 2 :
print 'Usage: %s fst_file [optional: save_file]' % argv [ 0 ]
return
flex_a = Flexparser ( )
mma = flex_a . yyparse ( argv [ 1 ] )
mma . minimize ( )
print mma
if len ( argv ) == 3 :
mma . save ( argv [ 2 ] )
|
def reflash_firmware ( self , hardware_id , ipmi = True , raid_controller = True , bios = True ) :
"""Reflash hardware firmware .
This will cause the server to be unavailable for ~ 60 minutes .
The firmware will not be upgraded but rather reflashed to the version installed .
: param int hardware _ id : The ID of the hardware to have its firmware
reflashed .
: param bool ipmi : Reflash the ipmi firmware .
: param bool raid _ controller : Reflash the raid controller firmware .
: param bool bios : Reflash the bios firmware .
Example : :
# Check the servers active transactions to see progress
result = mgr . reflash _ firmware ( hardware _ id = 1234)"""
|
return self . hardware . createFirmwareReflashTransaction ( bool ( ipmi ) , bool ( raid_controller ) , bool ( bios ) , id = hardware_id )
|
def temporary_dir ( ) :
"""Context manager that creates a temporary directory and chdirs to it .
When the context manager exits it returns to the previous cwd
and deletes the temporary directory ."""
|
d = tempfile . mkdtemp ( )
try :
with cd ( d ) :
yield d
finally :
if os . path . exists ( d ) :
shutil . rmtree ( d )
|
def splits ( cls , exts , fields , root = '.data' , train = 'train' , validation = 'IWSLT16.TED.tst2013' , test = 'IWSLT16.TED.tst2014' , ** kwargs ) :
"""Create dataset objects for splits of the IWSLT dataset .
Arguments :
exts : A tuple containing the extension to path for each language .
fields : A tuple containing the fields that will be used for data
in each language .
root : Root dataset storage directory . Default is ' . data ' .
train : The prefix of the train data . Default : ' train ' .
validation : The prefix of the validation data . Default : ' val ' .
test : The prefix of the test data . Default : ' test ' .
Remaining keyword arguments : Passed to the splits method of
Dataset ."""
|
cls . dirname = cls . base_dirname . format ( exts [ 0 ] [ 1 : ] , exts [ 1 ] [ 1 : ] )
cls . urls = [ cls . base_url . format ( exts [ 0 ] [ 1 : ] , exts [ 1 ] [ 1 : ] , cls . dirname ) ]
check = os . path . join ( root , cls . name , cls . dirname )
path = cls . download ( root , check = check )
train = '.' . join ( [ train , cls . dirname ] )
validation = '.' . join ( [ validation , cls . dirname ] )
if test is not None :
test = '.' . join ( [ test , cls . dirname ] )
if not os . path . exists ( os . path . join ( path , train ) + exts [ 0 ] ) :
cls . clean ( path )
train_data = None if train is None else cls ( os . path . join ( path , train ) , exts , fields , ** kwargs )
val_data = None if validation is None else cls ( os . path . join ( path , validation ) , exts , fields , ** kwargs )
test_data = None if test is None else cls ( os . path . join ( path , test ) , exts , fields , ** kwargs )
return tuple ( d for d in ( train_data , val_data , test_data ) if d is not None )
|
def select_renderer ( self , request , renderers , format_suffix = None ) :
"""Given a request and a list of renderers , return a two - tuple of :
( renderer , media type ) ."""
|
# Allow URL style format override . eg . " ? format = json
format_query_param = self . settings . URL_FORMAT_OVERRIDE
format = format_suffix or request . query_params . get ( format_query_param )
if format :
renderers = self . filter_renderers ( renderers , format )
accepts = self . get_accept_list ( request )
# Check the acceptable media types against each renderer ,
# attempting more specific media types first
# NB . The inner loop here isn ' t as bad as it first looks : )
# Worst case is we ' re looping over len ( accept _ list ) * len ( self . renderers )
for media_type_set in order_by_precedence ( accepts ) :
for renderer in renderers :
for media_type in media_type_set :
if media_type_matches ( renderer . media_type , media_type ) : # Return the most specific media type as accepted .
media_type_wrapper = _MediaType ( media_type )
if ( _MediaType ( renderer . media_type ) . precedence > media_type_wrapper . precedence ) : # Eg client requests ' * / * '
# Accepted media type is ' application / json '
full_media_type = ';' . join ( ( renderer . media_type , ) + tuple ( '{0}={1}' . format ( key , value . decode ( HTTP_HEADER_ENCODING ) ) for key , value in media_type_wrapper . params . items ( ) ) )
return renderer , full_media_type
else : # Eg client requests ' application / json ; indent = 8'
# Accepted media type is ' application / json ; indent = 8'
return renderer , media_type
raise exceptions . NotAcceptable ( available_renderers = renderers )
|
def mass_2d ( self , R , Rs , rho0 ) :
"""mass enclosed a 3d sphere or radius r
: param r :
: param Ra :
: param Rs :
: return :"""
|
x = R / Rs
gx = self . g_ ( x )
m_2d = 4 * rho0 * Rs * R ** 2 * gx / x ** 2 * np . pi
return m_2d
|
def share_model_ndex ( ) :
"""Upload the model to NDEX"""
|
if request . method == 'OPTIONS' :
return { }
response = request . body . read ( ) . decode ( 'utf-8' )
body = json . loads ( response )
stmts_str = body . get ( 'stmts' )
stmts_json = json . loads ( stmts_str )
stmts = stmts_from_json ( stmts_json [ "statements" ] )
ca = CxAssembler ( stmts )
for n , v in body . items ( ) :
ca . cx [ 'networkAttributes' ] . append ( { 'n' : n , 'v' : v , 'd' : 'string' } )
ca . make_model ( )
network_id = ca . upload_model ( private = False )
return { 'network_id' : network_id }
|
def add_related ( self , * objects ) :
"""Add related items
The arguments can be individual items or cluster objects containing
several items .
When two groups of related items share one or more common members ,
they will be merged into one cluster ."""
|
master = None
# this will become the common cluster of all related items
slaves = set ( [ ] )
# set of clusters that are going to be merged in the master
solitaire = set ( [ ] )
# set of new items that are not yet part of a cluster
for new in objects :
if isinstance ( new , self . cls ) :
if master is None :
master = new
else :
slaves . add ( new )
for item in new . items :
existing = self . lookup . get ( item )
if existing is not None :
slaves . add ( existing )
else :
cluster = self . lookup . get ( new )
if cluster is None : # print " solitaire " , new
solitaire . add ( new )
elif master is None : # print " starting master " , new
master = cluster
elif master != cluster : # print " in slave " , new
slaves . add ( cluster )
# else :
# # nothing to do
# print " new in master " , new
if master is None :
master = self . cls ( [ ] )
for slave in slaves :
master . update ( slave )
for item in solitaire :
master . add_item ( item )
for item in master . items :
self . lookup [ item ] = master
|
def get_user_metadata ( self , bucket : str , key : str ) -> typing . Dict [ str , str ] :
"""Retrieves the user metadata for a given object in a given bucket . If the platform has any mandatory prefixes or
suffixes for the metadata keys , they should be stripped before being returned .
: param bucket : the bucket the object resides in .
: param key : the key of the object for which metadata is being
retrieved .
: return : a dictionary mapping metadata keys to metadata values ."""
|
raise NotImplementedError ( )
|
def update ( self , query , attributes , upsert = False ) :
"""Updates data in the table .
: Parameters :
- query ( dict ) , specify the WHERE clause
- attributes ( dict ) , specify the SET clause
- upsert : boolean . If True , then when there ' s no row matches the query , insert the values
: Return : Number of rows updated or inserted"""
|
if upsert :
found_result = self . find_one ( query )
if not found_result :
id = self . insert ( attributes )
if id > 0 :
return 1
else :
return 0
sql = build_update ( self . name , query , attributes )
return self . cursor . execute ( sql )
|
def parse_radl ( data ) :
"""Parse a RADL document .
Args :
- data ( str ) : filepath to a RADL content or a string with content .
Return : RADL object ."""
|
if data is None :
return None
elif os . path . isfile ( data ) :
f = open ( data )
data = "" . join ( f . readlines ( ) )
f . close ( )
elif data . strip ( ) == "" :
return RADL ( )
data = data + "\n"
parser = RADLParser ( lextab = 'radl' )
return parser . parse ( data )
|
def cmd_reload_global ( self , plname ) :
"""reload _ global ` plname `
Reload the * global * plugin named ` plname ` . You should close
all instances of the plugin before attempting to reload ."""
|
gpmon = self . fv . gpmon
p_info = gpmon . get_plugin_info ( plname )
gpmon . stop_plugin ( p_info )
self . fv . update_pending ( 0.5 )
self . fv . mm . load_module ( plname )
gpmon . reload_plugin ( plname )
self . fv . start_global_plugin ( plname )
return True
|
def notebook_authenticate ( cmd_args , force = False , silent = True ) :
"""Similiar to authenticate but prints student emails after
all calls and uses a different way to get codes . If SILENT is True ,
it will suppress the error message and redirect to FORCE = True"""
|
server = server_url ( cmd_args )
network . check_ssl ( )
access_token = None
if not force :
try :
access_token = refresh_local_token ( server )
except OAuthException as e : # Account for Invalid Grant Error During make _ token _ post
if not silent :
raise e
return notebook_authenticate ( cmd_args , force = True , silent = False )
if not access_token :
access_token = perform_oauth ( get_code_via_terminal , cmd_args , copy_msg = NOTEBOOK_COPY_MESSAGE , paste_msg = NOTEBOOK_PASTE_MESSAGE )
# Always display email
email = display_student_email ( cmd_args , access_token )
if email is None and not force :
return notebook_authenticate ( cmd_args , force = True )
# Token has expired
elif email is None : # Did not get a valid token even after a fresh login
log . warning ( 'Could not get login email. You may have been logged out. ' ' Try logging in again.' )
return access_token
|
def _get_memory_contents ( self ) :
"""Runs the scheduler to determine memory contents at every point in time .
Returns :
a list of frozenset of strings , where the ith entry describes the tensors
in memory when executing operation i ( where schedule [ i ] is an index into
GetAllOperationNames ( ) ) ."""
|
if self . _memory_contents is not None :
return self . _memory_contents
schedule = scheduler . minimize_peak_memory ( self . _graph , self . _scheduler_alg )
self . _memory_contents = self . _graph . compute_memory_contents_under_schedule ( schedule )
return self . _memory_contents
|
def _get_index_of_monomial ( self , element , enablesubstitution = True , daggered = False ) :
"""Returns the index of a monomial ."""
|
result = [ ]
processed_element , coeff1 = separate_scalar_factor ( element )
if processed_element in self . moment_substitutions :
r = self . _get_index_of_monomial ( self . moment_substitutions [ processed_element ] , enablesubstitution )
return [ ( k , coeff * coeff1 ) for k , coeff in r ]
if enablesubstitution :
processed_element = apply_substitutions ( processed_element , self . substitutions , self . pure_substitution_rules )
# Given the monomial , we need its mapping L _ y ( w ) to push it into
# a corresponding constraint matrix
if is_number_type ( processed_element ) :
return [ ( 0 , coeff1 ) ]
elif processed_element . is_Add :
monomials = processed_element . args
else :
monomials = [ processed_element ]
for monomial in monomials :
monomial , coeff2 = separate_scalar_factor ( monomial )
coeff = coeff1 * coeff2
if is_number_type ( monomial ) :
result . append ( ( 0 , coeff ) )
continue
k = - 1
if monomial != 0 :
if monomial . as_coeff_Mul ( ) [ 0 ] < 0 :
monomial = - monomial
coeff = - 1.0 * coeff
try :
new_element = self . moment_substitutions [ monomial ]
r = self . _get_index_of_monomial ( self . moment_substitutions [ new_element ] , enablesubstitution )
result += [ ( k , coeff * coeff3 ) for k , coeff3 in r ]
except KeyError :
try :
k = self . monomial_index [ monomial ]
result . append ( ( k , coeff ) )
except KeyError :
if not daggered :
dag_result = self . _get_index_of_monomial ( monomial . adjoint ( ) , daggered = True )
result += [ ( k , coeff0 * coeff ) for k , coeff0 in dag_result ]
else :
raise RuntimeError ( "The requested monomial " + str ( monomial ) + " could not be found." )
return result
|
def _apply_bracket_layers ( self ) :
"""Extract bracket layers in a GSGlyph into free - standing UFO glyphs with
Designspace substitution rules .
As of Glyphs . app 2.6 , only single axis bracket layers are supported , we
assume the axis to be the first axis in the Designspace . Bracket layer
backgrounds are not round - tripped .
A glyph can have more than one bracket layer but Designspace
rule / OpenType variation condition sets apply all substitutions in a rule
in a range , so we have to potentially sort bracket layers into rule
buckets . Example : if a glyph " x " has two bracket layers [ 300 ] and [ 600]
and glyph " a " has bracket layer [ 300 ] and the bracket axis tops out at
1000 , we need the following Designspace rules :
- BRACKET . 300.600 # min 300 , max 600 on the bracket axis .
- x - > x . BRACKET . 300
- BRACKET . 600.1000
- x - > x . BRACKET . 600
- BRACKET . 300.1000
- a - > a . BRACKET . 300"""
|
if not self . _designspace . axes :
raise ValueError ( "Cannot apply bracket layers unless at least one axis is defined." )
bracket_axis = self . _designspace . axes [ 0 ]
# Determine the axis scale in design space because crossovers / locations are
# in design space ( axis . default / minimum / maximum may be user space ) .
if bracket_axis . map :
axis_scale = [ design_location for _ , design_location in bracket_axis . map ]
bracket_axis_min = min ( axis_scale )
bracket_axis_max = max ( axis_scale )
else : # No mapping means user and design space are the same .
bracket_axis_min = bracket_axis . minimum
bracket_axis_max = bracket_axis . maximum
# 1 . bracket _ layer _ map : Organize all bracket layers by crossover value , so
# we can go through the layers by location and copy them to free - standing
# glyphs .
# 2 . glyph _ crossovers : Keep track of the crossover values of a single glyph , so
# we can easily sort them into rule buckets .
# 3 . glyph _ sanity _ counter : Count the number of master layers providing
# bracket layers per glyph and crossover value . We currently only support
# the situation where there is a bracket layer for _ all _ masters , what the
# Glyphs . app tutorial calls ' Changing All Masters ' .
bracket_layer_map = defaultdict ( list )
# type : Dict [ int , List [ classes . GSLayer ] ]
glyph_crossovers = defaultdict ( set )
# type : Dict [ str , Set [ int ] ]
glyph_sanity_counter = defaultdict ( list )
# type : Dict [ Tuple [ str , int ] , List [ str ] ]
for layer in self . bracket_layers :
glyph_name = layer . parent . name
n = layer . name
try :
bracket_crossover = int ( n [ n . index ( "[" ) + 1 : n . index ( "]" ) ] )
except ValueError :
raise ValueError ( "Only bracket layers with one numerical (design space) location " "(meaning the first axis in the designspace file) are currently " "supported." )
if not bracket_axis_min <= bracket_crossover <= bracket_axis_max :
raise ValueError ( "Glyph {glyph_name}: Bracket layer {layer_name} must be within the " "design space bounds of the {bracket_axis_name} axis: minimum " "{bracket_axis_minimum}, maximum {bracket_axis_maximum}." . format ( glyph_name = glyph_name , layer_name = n , bracket_axis_name = bracket_axis . name , bracket_axis_minimum = bracket_axis_min , bracket_axis_maximum = bracket_axis_max , ) )
bracket_layer_map [ bracket_crossover ] . append ( layer )
glyph_crossovers [ glyph_name ] . add ( bracket_crossover )
glyph_sanity_counter [ ( glyph_name , bracket_crossover ) ] . append ( layer . associatedMasterId )
# Check that each bracket layer is present in all master layers .
unbalanced_bracket_layers = [ ]
n_masters = len ( list ( self . masters ) )
for ( ( glyph_name , _ ) , master_layer_ids ) in glyph_sanity_counter . items ( ) :
if not len ( master_layer_ids ) == n_masters :
unbalanced_bracket_layers . append ( glyph_name )
if unbalanced_bracket_layers :
raise ValueError ( "Currently, we only support bracket layers that are present on all " "masters, i.e. what the Glyphs.app tutorial calls 'Changing All " "Masters'. There is a/are bracket layer(s) missing " "for glyph(s) {unbalanced_glyphs}." . format ( unbalanced_glyphs = unbalanced_bracket_layers ) )
# Sort crossovers into buckets .
rule_bucket = defaultdict ( list )
# type : Dict [ Tuple [ int , int ] , List [ int ] ]
for glyph_name , crossovers in sorted ( glyph_crossovers . items ( ) ) :
for crossover_min , crossover_max in util . pairwise ( sorted ( crossovers ) + [ bracket_axis_max ] ) :
rule_bucket [ ( int ( crossover_min ) , int ( crossover_max ) ) ] . append ( glyph_name )
# Generate rules for the bracket layers .
for ( axis_range_min , axis_range_max ) , glyph_names in sorted ( rule_bucket . items ( ) ) :
rule_name = "BRACKET.{}.{}" . format ( axis_range_min , axis_range_max )
glyph_sub_suffix = ".BRACKET.{}" . format ( axis_range_min )
rule = designspaceLib . RuleDescriptor ( )
rule . name = rule_name
rule . conditionSets . append ( [ { "name" : bracket_axis . name , "minimum" : axis_range_min , "maximum" : axis_range_max , } ] )
rule . subs . extend ( [ ( glyph_name , glyph_name + glyph_sub_suffix ) for glyph_name in glyph_names ] )
self . _designspace . addRule ( rule )
# Finally , copy bracket layers to their own glyphs .
for location , layers in bracket_layer_map . items ( ) :
for layer in layers :
ufo_font = self . _sources [ layer . associatedMasterId or layer . layerId ] . font . layers . defaultLayer
ufo_glyph_name = "{glyph_name}.BRACKET.{location}" . format ( glyph_name = layer . parent . name , location = location )
ufo_glyph = ufo_font . newGlyph ( ufo_glyph_name )
self . to_ufo_glyph ( ufo_glyph , layer , layer . parent )
ufo_glyph . unicodes = [ ]
# Avoid cmap interference
ufo_glyph . lib [ GLYPHLIB_PREFIX + "_originalLayerName" ] = layer . name
|
def get_previous_version ( version : str ) -> Optional [ str ] :
"""Returns the version prior to the given version .
: param version : A string with the version number .
: return : A string with the previous version number"""
|
debug ( 'get_previous_version' )
found_version = False
for commit_hash , commit_message in get_commit_log ( ) :
debug ( 'checking commit {}' . format ( commit_hash ) )
if version in commit_message :
found_version = True
debug ( 'found_version in "{}"' . format ( commit_message ) )
continue
if found_version :
matches = re . match ( r'v?(\d+.\d+.\d+)' , commit_message )
if matches :
debug ( 'version matches' , commit_message )
return matches . group ( 1 ) . strip ( )
return get_last_version ( [ version , 'v{}' . format ( version ) ] )
|
def build_includes ( ) :
"""Creates rst files in the _ include directory using the python scripts
there .
This will ignore any files in the _ include directory that start with ` ` _ ` ` ."""
|
print ( "Running scripts in _include:" )
cwd = os . getcwd ( )
os . chdir ( '_include' )
pyfiles = glob . glob ( '*.py' )
for fn in pyfiles :
if not fn . startswith ( '_' ) :
print ( ' {}' . format ( fn ) )
subprocess . check_output ( [ 'python' , fn ] )
os . chdir ( cwd )
|
def get_raw_input ( description , default = False ) :
"""Get user input from the command line via raw _ input / input .
description ( unicode ) : Text to display before prompt .
default ( unicode or False / None ) : Default value to display with prompt .
RETURNS ( unicode ) : User input ."""
|
additional = ' (default: %s)' % default if default else ''
prompt = ' %s%s: ' % ( description , additional )
user_input = input_ ( prompt )
return user_input
|
def curtailment ( self ) :
"""Get curtailment time series of dispatchable generators ( only active
power )
Parameters
curtailment : list or : pandas : ` pandas . DataFrame < dataframe > `
See class definition for details .
Returns
: pandas : ` pandas . DataFrame < dataframe > `
In the case curtailment is applied to all solar and wind generators
curtailment time series either aggregated by technology type or by
type and weather cell ID are returnded . In the first case columns
of the DataFrame are ' solar ' and ' wind ' ; in the second case columns
need to be a : pandas : ` pandas . MultiIndex < multiindex > ` with the
first level containing the type and the second level the weather
cell ID .
In the case curtailment is only applied to specific generators ,
curtailment time series of all curtailed generators , specified in
by the column name are returned ."""
|
if self . _curtailment is not None :
if isinstance ( self . _curtailment , pd . DataFrame ) :
try :
return self . _curtailment . loc [ [ self . timeindex ] , : ]
except :
return self . _curtailment . loc [ self . timeindex , : ]
elif isinstance ( self . _curtailment , list ) :
try :
curtailment = pd . DataFrame ( )
for gen in self . _curtailment :
curtailment [ gen ] = gen . curtailment
return curtailment
except :
raise
else :
return None
|
def plot ( self , axis = None , ** kargs ) :
"""- plot ( axis = None , * * kwarg ) : Finally , sphviewer . Scene class has its own plotting method .
It shows the scene as seen by the camera . It is to say , it plots the particles according
to their aparent coordinates ; axis makes a reference to an existing axis . In case axis is None ,
the plot is made on the current axis .
The kwargs are : class : ` ~ matplotlib . lines . Line2D ` properties :
agg _ filter : unknown
alpha : float ( 0.0 transparent through 1.0 opaque )
animated : [ True | False ]
antialiased or aa : [ True | False ]
axes : an : class : ` ~ matplotlib . axes . Axes ` instance
clip _ box : a : class : ` matplotlib . transforms . Bbox ` instance
clip _ on : [ True | False ]
clip _ path : [ ( : class : ` ~ matplotlib . path . Path ` , : class : ` ~ matplotlib . transforms . Transform ` ) | : class : ` ~ matplotlib . patches . Patch ` | None ]
color or c : any matplotlib color
contains : a callable function
dash _ capstyle : [ ' butt ' | ' round ' | ' projecting ' ]
dash _ joinstyle : [ ' miter ' | ' round ' | ' bevel ' ]
dashes : sequence of on / off ink in points
data : 2D array ( rows are x , y ) or two 1D arrays
drawstyle : [ ' default ' | ' steps ' | ' steps - pre ' | ' steps - mid ' | ' steps - post ' ]
figure : a : class : ` matplotlib . figure . Figure ` instance
fillstyle : [ ' full ' | ' left ' | ' right ' | ' bottom ' | ' top ' ]
gid : an id string
label : any string
linestyle or ls : [ ` ` ' - ' ` ` | ` ` ' - - ' ` ` | ` ` ' - . ' ` ` | ` ` ' : ' ` ` | ` ` ' None ' ` ` | ` ` ' ' ` ` | ` ` ' ' ` ` ] and any drawstyle in combination with a linestyle , e . g . ` ` ' steps - - ' ` ` .
linewidth or lw : float value in points
lod : [ True | False ]
marker : [ ` ` 7 ` ` | ` ` 4 ` ` | ` ` 5 ` ` | ` ` 6 ` ` | ` ` ' o ' ` ` | ` ` ' D ' ` ` | ` ` ' h ' ` ` | ` ` ' H ' ` ` | ` ` ' _ ' ` ` | ` ` ' ' ` ` | ` ` ' None ' ` ` | ` ` ' ' ` ` | ` ` None ` ` | ` ` ' 8 ' ` ` | ` ` ' p ' ` ` | ` ` ' , ' ` ` | ` ` ' + ' ` ` | ` ` ' . ' ` ` | ` ` ' s ' ` ` | ` ` ' * ' ` ` | ` ` ' d ' ` ` | ` ` 3 ` ` | ` ` 0 ` ` | ` ` 1 ` ` | ` ` 2 ` ` | ` ` ' 1 ' ` ` | ` ` ' 3 ' ` ` | ` ` ' 4 ' ` ` | ` ` ' 2 ' ` ` | ` ` ' v ' ` ` | ` ` ' < ' ` ` | ` ` ' > ' ` ` | ` ` ' ^ ' ` ` | ` ` ' | ' ` ` | ` ` ' x ' ` ` | ` ` ' $ . . . $ ' ` ` | * tuple * | * Nx2 array * ]
markeredgecolor or mec : any matplotlib color
markeredgewidth or mew : float value in points
markerfacecolor or mfc : any matplotlib color
markerfacecoloralt or mfcalt : any matplotlib color
markersize or ms : float
markevery : None | integer | ( startind , stride )
picker : float distance in points or callable pick function ` ` fn ( artist , event ) ` `
pickradius : float distance in points
rasterized : [ True | False | None ]
snap : unknown
solid _ capstyle : [ ' butt ' | ' round ' | ' projecting ' ]
solid _ joinstyle : [ ' miter ' | ' round ' | ' bevel ' ]
transform : a : class : ` matplotlib . transforms . Transform ` instance
url : a url string
visible : [ True | False ]
xdata : 1D array
ydata : 1D array
zorder : any number
kwargs * scalex * and * scaley * , if defined , are passed on to
: meth : ` ~ matplotlib . axes . Axes . autoscale _ view ` to determine
whether the * x * and * y * axes are autoscaled ; the default is
* True * .
Additional kwargs : hold = [ True | False ] overrides default hold state"""
|
if ( axis == None ) :
axis = plt . gca ( )
axis . plot ( self . __x , self . __y , 'k.' , ** kargs )
|
def dense ( x , output_dim , reduced_dims = None , expert_dims = None , use_bias = True , activation = None , master_dtype = tf . float32 , slice_dtype = tf . float32 , variable_dtype = None , name = None ) :
"""Dense layer doing ( kernel * x + bias ) computation .
Args :
x : a mtf . Tensor of shape [ . . . , reduced _ dims ] .
output _ dim : a mtf . Dimension
reduced _ dims : an optional list of mtf . Dimensions of x to be reduced . If
omitted , we reduce the last dimension .
expert _ dims : an optional list of mtf . Dimension which represent different
experts . Different experts get different weights .
use _ bias : a boolean , whether to add bias .
activation : an optional function from mtf . Tensor to mtf . Tensor
master _ dtype : a tf . dtype ( deprecated - use variable _ dtype )
slice _ dtype : a tf . dtype ( deprecated - use variable _ dtype )
variable _ dtype : a mtf . VariableDType
name : a string . variable scope .
Returns :
a mtf . Tensor of shape [ . . . , output _ dim ] ."""
|
if variable_dtype is None :
variable_dtype = mtf . VariableDType ( master_dtype , slice_dtype , x . dtype )
if expert_dims is None :
expert_dims = [ ]
if reduced_dims is None :
reduced_dims = x . shape . dims [ - 1 : ]
w_shape = mtf . Shape ( expert_dims + reduced_dims + [ output_dim ] )
output_shape = mtf . Shape ( [ d for d in x . shape . dims if d not in reduced_dims ] + [ output_dim ] )
with tf . variable_scope ( name , default_name = "dense" ) :
stddev = mtf . list_product ( d . size for d in reduced_dims ) ** - 0.5
w = mtf . get_variable ( x . mesh , "kernel" , w_shape , initializer = tf . random_normal_initializer ( stddev = stddev ) , dtype = variable_dtype )
w = mtf . cast ( w , x . dtype )
y = mtf . einsum ( [ x , w ] , output_shape )
if use_bias :
b = mtf . get_variable ( x . mesh , "bias" , mtf . Shape ( expert_dims + [ output_dim ] ) , initializer = tf . zeros_initializer ( ) , dtype = variable_dtype )
y += b
if activation is not None :
y = activation ( y )
return y
|
def add_atype ( self , ) :
"""Add a atype and store it in the self . atypes
: returns : None
: rtype : None
: raises : None"""
|
i = self . atype_tablev . currentIndex ( )
item = i . internalPointer ( )
if item :
atype = item . internal_data ( )
atype . projects . add ( self . _project )
self . atypes . append ( atype )
item . set_parent ( None )
|
def generate_signed_url ( self , expiration = None , api_access_endpoint = _API_ACCESS_ENDPOINT , method = "GET" , content_md5 = None , content_type = None , response_disposition = None , response_type = None , generation = None , headers = None , query_parameters = None , client = None , credentials = None , version = None , ) :
"""Generates a signed URL for this blob .
. . note : :
If you are on Google Compute Engine , you can ' t generate a signed
URL using GCE service account . Follow ` Issue 50 ` _ for updates on
this . If you ' d like to be able to generate a signed URL from GCE ,
you can use a standard service account from a JSON file rather
than a GCE service account .
. . _ Issue 50 : https : / / github . com / GoogleCloudPlatform / google - auth - library - python / issues / 50
If you have a blob that you want to allow access to for a set
amount of time , you can use this method to generate a URL that
is only valid within a certain time period .
This is particularly useful if you don ' t want publicly
accessible blobs , but don ' t want to require users to explicitly
log in .
: type expiration : Union [ Integer , datetime . datetime , datetime . timedelta ]
: param expiration : Point in time when the signed URL should expire .
: type api _ access _ endpoint : str
: param api _ access _ endpoint : Optional URI base .
: type method : str
: param method : The HTTP verb that will be used when requesting the URL .
: type content _ md5 : str
: param content _ md5 : ( Optional ) The MD5 hash of the object referenced by
` ` resource ` ` .
: type content _ type : str
: param content _ type : ( Optional ) The content type of the object
referenced by ` ` resource ` ` .
: type response _ disposition : str
: param response _ disposition : ( Optional ) Content disposition of
responses to requests for the signed URL .
For example , to enable the signed URL
to initiate a file of ` ` blog . png ` ` , use
the value
` ` ' attachment ; filename = blob . png ' ` ` .
: type response _ type : str
: param response _ type : ( Optional ) Content type of responses to requests
for the signed URL . Used to over - ride the content
type of the underlying blob / object .
: type generation : str
: param generation : ( Optional ) A value that indicates which generation
of the resource to fetch .
: type headers : dict
: param headers :
( Optional ) Additional HTTP headers to be included as part of the
signed URLs . See :
https : / / cloud . google . com / storage / docs / xml - api / reference - headers
Requests using the signed URL * must * pass the specified header
( name and value ) with each request for the URL .
: type query _ parameters : dict
: param query _ parameters :
( Optional ) Additional query paramtersto be included as part of the
signed URLs . See :
https : / / cloud . google . com / storage / docs / xml - api / reference - headers # query
: type client : : class : ` ~ google . cloud . storage . client . Client ` or
` ` NoneType ` `
: param client : ( Optional ) The client to use . If not passed , falls back
to the ` ` client ` ` stored on the blob ' s bucket .
: type credentials : : class : ` oauth2client . client . OAuth2Credentials ` or
: class : ` NoneType `
: param credentials : ( Optional ) The OAuth2 credentials to use to sign
the URL . Defaults to the credentials stored on the
client used .
: type version : str
: param version : ( Optional ) The version of signed credential to create .
Must be one of ' v2 ' | ' v4 ' .
: raises : : exc : ` ValueError ` when version is invalid .
: raises : : exc : ` TypeError ` when expiration is not a valid type .
: raises : : exc : ` AttributeError ` if credentials is not an instance
of : class : ` google . auth . credentials . Signing ` .
: rtype : str
: returns : A signed URL you can use to access the resource
until expiration ."""
|
if version is None :
version = "v2"
elif version not in ( "v2" , "v4" ) :
raise ValueError ( "'version' must be either 'v2' or 'v4'" )
resource = "/{bucket_name}/{quoted_name}" . format ( bucket_name = self . bucket . name , quoted_name = quote ( self . name . encode ( "utf-8" ) ) )
if credentials is None :
client = self . _require_client ( client )
credentials = client . _credentials
if version == "v2" :
helper = generate_signed_url_v2
else :
helper = generate_signed_url_v4
return helper ( credentials , resource = resource , expiration = expiration , api_access_endpoint = api_access_endpoint , method = method . upper ( ) , content_md5 = content_md5 , content_type = content_type , response_type = response_type , response_disposition = response_disposition , generation = generation , headers = headers , query_parameters = query_parameters , )
|
def groupby_apply ( df , cols , func , * args , ** kwargs ) :
"""Groupby cols and call the function fn on each grouped dataframe .
Parameters
cols : str | list of str
columns to groupby
func : function
function to call on the grouped data
* args : tuple
positional parameters to pass to func
* * kwargs : dict
keyword parameter to pass to func
This is meant to avoid pandas df . groupby ( ' col ' ) . apply ( fn , * args ) ,
as it calls fn twice on the first dataframe . If the nested code also
does the same thing , it can be very expensive"""
|
try :
axis = kwargs . pop ( 'axis' )
except KeyError :
axis = 0
lst = [ ]
for _ , d in df . groupby ( cols ) : # function fn should be free to modify dataframe d , therefore
# do not mark d as a slice of df i . e no SettingWithCopyWarning
lst . append ( func ( d , * args , ** kwargs ) )
return pd . concat ( lst , axis = axis , ignore_index = True )
|
def visit_VariableDeclaration ( self , node ) :
"""Visitor for ` VariableDeclaration ` AST node ."""
|
var_name = node . assignment . left . identifier . name
var_is_mutable = node . assignment . left . is_mutable
var_symbol = VariableSymbol ( var_name , var_is_mutable )
if self . table [ var_name ] is not None :
raise SementicError ( f"Variable `{var_name}` is already declared." )
self . table [ var_symbol . name ] = var_symbol
self . visit ( node . assignment . left )
self . visit ( node . assignment . right )
|
def compile_results ( self ) :
"""Compile all results for the current test"""
|
self . _init_dataframes ( )
self . total_transactions = len ( self . main_results [ 'raw' ] )
self . _init_dates ( )
|
def to_dqflag ( self , name = None , minlen = 1 , dtype = None , round = False , label = None , description = None ) :
"""Convert this series into a ` ~ gwpy . segments . DataQualityFlag ` .
Each contiguous set of ` True ` values are grouped as a
` ~ gwpy . segments . Segment ` running from the GPS time the first
found ` True ` , to the GPS time of the next ` False ` ( or the end
of the series )
Parameters
minlen : ` int ` , optional
minimum number of consecutive ` True ` values to identify as a
` ~ gwpy . segments . Segment ` . This is useful to ignore single
bit flips , for example .
dtype : ` type ` , ` callable `
output segment entry type , can pass either a type for simple
casting , or a callable function that accepts a float and returns
another numeric type , defaults to the ` dtype ` of the time index
round : ` bool ` , optional
choose to round each ` ~ gwpy . segments . Segment ` to its
inclusive integer boundaries
label : ` str ` , optional
the : attr : ` ~ gwpy . segments . DataQualityFlag . label ` for the
output flag .
description : ` str ` , optional
the : attr : ` ~ gwpy . segments . DataQualityFlag . description ` for the
output flag .
Returns
dqflag : ` ~ gwpy . segments . DataQualityFlag `
a segment representation of this ` StateTimeSeries ` , the span
defines the ` known ` segments , while the contiguous ` True `
sets defined each of the ` active ` segments"""
|
from . . segments import DataQualityFlag
# format dtype
if dtype is None :
dtype = self . t0 . dtype
if isinstance ( dtype , numpy . dtype ) : # use callable dtype
dtype = dtype . type
start = dtype ( self . t0 . value )
dt = dtype ( self . dt . value )
# build segmentlists ( can use simple objects since DQFlag converts )
active = _bool_segments ( self . value , start , dt , minlen = int ( minlen ) )
known = [ tuple ( map ( dtype , self . span ) ) ]
# build flag and return
out = DataQualityFlag ( name = name or self . name , active = active , known = known , label = label or self . name , description = description )
if round :
return out . round ( )
return out
|
def predict ( self , X , cut_point = 0.5 ) :
"""Predicted class .
Parameters
X : array - like , shape = [ n _ samples , n _ features ]
Returns
T : array - like , shape = [ n _ samples ]
Returns the prediction of the sample . ."""
|
return np . floor ( self . predict_proba ( X ) [ : , 1 ] + ( 1 - cut_point ) )
|
def _get_subclass_names ( self , classname , namespace , deep_inheritance ) :
"""Get class names that are subclasses of the
classname input parameter from the repository .
If DeepInheritance is False , get only classes in the
repository for the defined namespace for which this class is a
direct super class .
If deep _ inheritance is ` True ` , get all direct and indirect
subclasses . If false , get only a the next level of the
hiearchy .
Returns :
list of strings with the names of all subclasses of ` classname ` ."""
|
assert classname is None or isinstance ( classname , ( six . string_types , CIMClassName ) )
if isinstance ( classname , CIMClassName ) :
classname = classname . classname
# retrieve first level of subclasses for which classname is superclass
try :
classes = self . classes [ namespace ]
except KeyError :
classes = NocaseDict ( )
if classname is None :
rtn_classnames = [ cl . classname for cl in six . itervalues ( classes ) if cl . superclass is None ]
else :
rtn_classnames = [ cl . classname for cl in six . itervalues ( classes ) if cl . superclass and cl . superclass . lower ( ) == classname . lower ( ) ]
# recurse for next level of class hiearchy
if deep_inheritance :
subclass_names = [ ]
if rtn_classnames :
for cn in rtn_classnames :
subclass_names . extend ( self . _get_subclass_names ( cn , namespace , deep_inheritance ) )
rtn_classnames . extend ( subclass_names )
return rtn_classnames
|
def start_server ( data_stream , port = 5557 , hwm = 10 ) :
"""Start a data processing server .
This command starts a server in the current process that performs the
actual data processing ( by retrieving data from the given data stream ) .
It also starts a second process , the broker , which mediates between the
server and the client . The broker also keeps a buffer of batches in
memory .
Parameters
data _ stream : : class : ` . DataStream `
The data stream to return examples from .
port : int , optional
The port the server and the client ( training loop ) will use to
communicate . Defaults to 5557.
hwm : int , optional
The ` ZeroMQ high - water mark ( HWM )
< http : / / zguide . zeromq . org / page : all # High - Water - Marks > ` _ on the
sending socket . Increasing this increases the buffer , which can be
useful if your data preprocessing times are very random . However ,
it will increase memory usage . There is no easy way to tell how
many batches will actually be queued with a particular HWM .
Defaults to 10 . Be sure to set the corresponding HWM on the
receiving end as well ."""
|
logging . basicConfig ( level = 'INFO' )
context = zmq . Context ( )
socket = context . socket ( zmq . PUSH )
socket . set_hwm ( hwm )
socket . bind ( 'tcp://*:{}' . format ( port ) )
it = data_stream . get_epoch_iterator ( )
logger . info ( 'server started' )
while True :
try :
data = next ( it )
stop = False
logger . debug ( "sending {} arrays" . format ( len ( data ) ) )
except StopIteration :
it = data_stream . get_epoch_iterator ( )
data = None
stop = True
logger . debug ( "sending StopIteration" )
send_arrays ( socket , data , stop = stop )
|
def error_page ( participant = None , error_text = None , compensate = True , error_type = "default" , request_data = "" , ) :
"""Render HTML for error page ."""
|
config = _config ( )
if error_text is None :
error_text = """There has been an error and so you are unable to
continue, sorry!"""
if participant is not None :
hit_id = ( participant . hit_id , )
assignment_id = ( participant . assignment_id , )
worker_id = participant . worker_id
participant_id = participant . id
else :
hit_id = request . form . get ( "hit_id" , "" )
assignment_id = request . form . get ( "assignment_id" , "" )
worker_id = request . form . get ( "worker_id" , "" )
participant_id = request . form . get ( "participant_id" , None )
if participant_id :
try :
participant_id = int ( participant_id )
except ( ValueError , TypeError ) :
participant_id = None
return make_response ( render_template ( "error.html" , error_text = error_text , compensate = compensate , contact_address = config . get ( "contact_email_on_error" ) , error_type = error_type , hit_id = hit_id , assignment_id = assignment_id , worker_id = worker_id , request_data = request_data , participant_id = participant_id , ) , 500 , )
|
def _suicide_when_without_parent ( self , parent_pid ) :
'''Kill this process when the parent died .'''
|
while True :
time . sleep ( 5 )
try : # Check pid alive
os . kill ( parent_pid , 0 )
except OSError : # Forcibly exit
# Regular sys . exit raises an exception
self . stop ( )
log . warning ( 'The parent is not alive, exiting.' )
os . _exit ( 999 )
|
def cov ( self , other = None , pairwise = None , bias = False , ** kwargs ) :
"""Exponential weighted sample covariance ."""
|
if other is None :
other = self . _selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self . _shallow_copy ( other )
def _get_cov ( X , Y ) :
X = self . _shallow_copy ( X )
Y = self . _shallow_copy ( Y )
cov = libwindow . ewmcov ( X . _prep_values ( ) , Y . _prep_values ( ) , self . com , int ( self . adjust ) , int ( self . ignore_na ) , int ( self . min_periods ) , int ( bias ) )
return X . _wrap_result ( cov )
return _flex_binary_moment ( self . _selected_obj , other . _selected_obj , _get_cov , pairwise = bool ( pairwise ) )
|
def _imagpart ( self , f ) :
"""Function returning the imaginary part of the result from ` ` f ` ` ."""
|
def f_im ( x , ** kwargs ) :
result = np . asarray ( f ( x , ** kwargs ) , dtype = self . scalar_out_dtype )
return result . imag
if is_real_dtype ( self . out_dtype ) :
return self . zero ( )
else :
return self . real_space . element ( f_im )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.