signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def run ( app : web . Application ) :
"""Runs the application in an async context .
This function will block indefinitely until the application is shut down .
Args :
app ( web . Application ) :
The Aiohttp Application as created by ` create _ app ( ) `"""
|
host = app [ 'config' ] [ 'host' ]
port = app [ 'config' ] [ 'port' ]
# starts app . run _ app ( ) will automatically start the async context .
web . run_app ( app , host = host , port = port )
|
def mavlink_packet ( self , m ) :
'''handle an incoming mavlink packet'''
|
if not self . mpstate . map : # don ' t draw if no map
return
if m . get_type ( ) != 'GIMBAL_REPORT' :
return
needed = [ 'ATTITUDE' , 'GLOBAL_POSITION_INT' ]
for n in needed :
if not n in self . master . messages :
return
# clear the camera icon
self . mpstate . map . add_object ( mp_slipmap . SlipClearLayer ( 'GimbalView' ) )
gpi = self . master . messages [ 'GLOBAL_POSITION_INT' ]
att = self . master . messages [ 'ATTITUDE' ]
vehicle_dcm = Matrix3 ( )
vehicle_dcm . from_euler ( att . roll , att . pitch , att . yaw )
rotmat_copter_gimbal = Matrix3 ( )
rotmat_copter_gimbal . from_euler312 ( m . joint_roll , m . joint_el , m . joint_az )
gimbal_dcm = vehicle_dcm * rotmat_copter_gimbal
lat = gpi . lat * 1.0e-7
lon = gpi . lon * 1.0e-7
alt = gpi . relative_alt * 1.0e-3
# ground plane
ground_plane = Plane ( )
# the position of the camera in the air , remembering its a right
# hand coordinate system , so + ve z is down
camera_point = Vector3 ( 0 , 0 , - alt )
# get view point of camera when not rotated
view_point = Vector3 ( 1 , 0 , 0 )
# rotate view _ point to form current view vector
rot_point = gimbal_dcm * view_point
# a line from the camera to the ground
line = Line ( camera_point , rot_point )
# find the intersection with the ground
pt = line . plane_intersection ( ground_plane , forward_only = True )
if pt is None : # its pointing up into the sky
return None
( view_lat , view_lon ) = mp_util . gps_offset ( lat , lon , pt . y , pt . x )
icon = self . mpstate . map . icon ( 'camera-small-red.png' )
self . mpstate . map . add_object ( mp_slipmap . SlipIcon ( 'gimbalview' , ( view_lat , view_lon ) , icon , layer = 'GimbalView' , rotation = 0 , follow = False ) )
|
def gamma_centered ( cls , kpts = ( 1 , 1 , 1 ) , use_symmetries = True , use_time_reversal = True ) :
"""Convenient static constructor for an automatic Gamma centered Kpoint grid .
Args :
kpts : Subdivisions N _ 1 , N _ 2 and N _ 3 along reciprocal lattice vectors .
use _ symmetries : False if spatial symmetries should not be used
to reduce the number of independent k - points .
use _ time _ reversal : False if time - reversal symmetry should not be used
to reduce the number of independent k - points .
Returns :
: class : ` KSampling ` object ."""
|
return cls ( kpts = [ kpts ] , kpt_shifts = ( 0.0 , 0.0 , 0.0 ) , use_symmetries = use_symmetries , use_time_reversal = use_time_reversal , comment = "gamma-centered mode" )
|
def create_with_validation ( cls , * args , ** kwargs ) :
"""Factory method that creates and validates the model object before it is saved ."""
|
ret_val = cls ( * args , ** kwargs )
ret_val . full_clean ( )
ret_val . save ( )
return ret_val
|
def form_valid ( self , post_form , attachment_formset , ** kwargs ) :
"""Processes valid forms .
Called if all forms are valid . Creates a Post instance along with associated attachments if
required and then redirects to a success page ."""
|
save_attachment_formset = attachment_formset is not None and not self . preview
if self . preview :
return self . render_to_response ( self . get_context_data ( preview = True , post_form = post_form , attachment_formset = attachment_formset , ** kwargs ) , )
# This is not a preview ; the object is going to be saved
self . forum_post = post_form . save ( )
if save_attachment_formset :
attachment_formset . post = self . forum_post
attachment_formset . save ( )
messages . success ( self . request , self . success_message )
if not self . forum_post . approved :
messages . warning ( self . request , self . approval_required_message )
return HttpResponseRedirect ( self . get_success_url ( ) )
|
def _get_analysis_type ( analysis_types : List [ str ] ) -> str :
"""Determine the overall analysis type ."""
|
types_set = set ( analysis_types )
return types_set . pop ( ) if len ( types_set ) == 1 else 'wgs'
|
def re_run_last_cell ( self ) :
"""Run the previous cell again ."""
|
text , line = ( self . get_current_editor ( ) . get_last_cell_as_executable_code ( ) )
self . _run_cell_text ( text , line )
|
async def post ( self , path , data = { } , send_raw = False , ** params ) :
'''sends post request
Parameters
path : str
same as get _ url
query : kargs dict
additional info to pass to get _ url
See Also
get _ url :
Returns
requests . models . Response
the response that was given'''
|
url = self . get_url ( path , ** params )
jstr = json . dumps ( data )
for i in range ( self . tries + 1 ) :
try :
if send_raw :
resp = await self . session . post ( url , data = data , timeout = self . timeout )
else :
resp = await self . session . post ( url , data = jstr , timeout = self . timeout )
if await self . _process_resp ( resp ) :
return resp
else :
continue
except aiohttp . ClientConnectionError :
if i >= self . tries :
raise aiohttp . ClientConnectionError ( 'Emby server is probably down' )
|
def zoomlevel ( self ) :
"""Retrieves zoomlevel from the output response
Returns :
zoomlevel ( namedtuple ) : A namedtuple of zoomlevel from the output
response"""
|
resources = self . get_resource ( )
zoomlevel = namedtuple ( 'zoomlevel' , 'zoomLevel' )
try :
return [ zoomlevel ( resource [ 'zoomLevel' ] ) for resource in resources ]
except TypeError :
try :
if isinstance ( resources [ 'ElevationData' ] , dict ) :
return zoomlevel ( resources [ 'ElevationData' ] [ 'ZoomLevel' ] )
except KeyError :
try :
if isinstance ( resources [ 'SeaLevelData' ] , dict ) :
zoom = resources [ 'SeaLevelData' ] [ 'ZoomLevel' ]
return zoomlevel ( zoom )
except KeyError :
print ( KeyError )
|
def get_response ( self ) :
"""Generate the response block of this request .
Careful : it only sets the fields which can be set from the request"""
|
res = IODWriteMultipleRes ( )
for field in [ "seqNum" , "ARUUID" , "API" , "slotNumber" , "subslotNumber" , "index" ] :
res . setfieldval ( field , self . getfieldval ( field ) )
# append all block response
res_blocks = [ ]
for block in self . getfieldval ( "blocks" ) :
res_blocks . append ( block . get_response ( ) )
res . setfieldval ( "blocks" , res_blocks )
return res
|
def format_filename ( series_name , season_number , episode_numbers , episode_names , extension ) :
"""Generates a filename based on metadata using configured format .
: param str series _ name : name of TV series
: param int season _ number : the numeric season of series
: param list ( int ) episode _ numbers : the numeric episode of the series
: param list ( str ) episode _ names : episode title of the series
: param str extension : media file extension
: returns : formatted filename using input values and configured format
: rtype : str"""
|
epdata = { 'seriesname' : tc . titlecase ( _replace_series_name ( series_name , cfg . CONF . output_series_replacements ) or '' ) , 'seasonnumber' : season_number , 'episode' : _format_episode_numbers ( episode_numbers ) , 'episodename' : tc . titlecase ( _format_episode_name ( episode_names ) ) , 'ext' : extension , }
value = apply_replacements ( cfg . CONF . filename_format_ep % epdata , cfg . CONF . output_filename_replacements )
return _make_valid_filename ( value )
|
def make_carrier_tone ( freq , db , dur , samplerate , caldb = 100 , calv = 0.1 ) :
"""Produce a pure tone signal
: param freq : Frequency of the tone to be produced ( Hz )
: type freq : int
: param db : Intensity of the tone in dB SPL
: type db : int
: param dur : duration ( seconds )
: type dur : float
: param samplerate : generation frequency of tone ( Hz )
: type samplerate : int
: param caldb : Reference intensity ( dB SPL ) . Together with calv , provides a reference point for what intensity equals what output voltage level
: type caldb : int
: param calv : Reference voltage ( V ) . Together with caldb , provides a reference point for what intensity equals what output voltage level
: type calv : float
: returns : tone , timevals - - the signal and the time index values"""
|
if samplerate <= 0 :
raise ValueError ( "Samplerate must be greater than 0" )
if caldb <= 0 :
raise ValueError ( "Calibration dB SPL must be greater than 0" )
npts = int ( dur * samplerate )
amp = ( 10 ** ( ( db - caldb ) / 20 ) * calv )
if USE_RMS :
amp *= 1.414213562373
if VERBOSE :
print ( "current dB: {}, fs: {}, current frequency: {} kHz, AO Amp: {:.6f}" . format ( db , samplerate , freq / 1000 , amp ) )
print ( "cal dB: {}, V at cal dB: {}" . format ( caldb , calv ) )
tone = amp * np . sin ( ( freq * dur ) * np . linspace ( 0 , 2 * np . pi , npts ) )
timevals = np . arange ( npts ) / samplerate
return tone , timevals
|
def add_var_arg ( self , arg_index ) :
"""Add a command to the submit file to allow variable ( macro ) arguments
to be passed to the executable ."""
|
try :
self . __var_args [ arg_index ]
except IndexError :
if arg_index != self . __arg_index :
raise CondorDAGJobError , "mismatch between job and node var_arg index"
self . __var_args . append ( '$(macroargument%s)' % str ( arg_index ) )
self . add_arg ( self . __var_args [ self . __arg_index ] )
self . __arg_index += 1
|
def merge_deltas ( stmts_in ) :
"""Gather and merge original Influence delta information from evidence .
This function is only applicable to Influence Statements that have
subj and obj deltas . All other statement types are passed through unchanged .
Polarities and adjectives for subjects and objects respectivey are
collected and merged by travesrsing all evidences of a Statement .
Parameters
stmts _ in : list [ indra . statements . Statement ]
A list of INDRA Statements whose influence deltas should be merged .
These Statements are meant to have been preassembled and potentially
have multiple pieces of evidence .
Returns
stmts _ out : list [ indra . statements . Statement ]
The list of Statements now with deltas merged at the Statement
level ."""
|
stmts_out = [ ]
for stmt in stmts_in : # This operation is only applicable to Influences
if not isinstance ( stmt , Influence ) :
stmts_out . append ( stmt )
continue
# At this point this is guaranteed to be an Influence
deltas = { }
for role in ( 'subj' , 'obj' ) :
for info in ( 'polarity' , 'adjectives' ) :
key = ( role , info )
deltas [ key ] = [ ]
for ev in stmt . evidence :
entry = ev . annotations . get ( '%s_%s' % key )
deltas [ key ] . append ( entry if entry else None )
# POLARITY
# For polarity we need to work in pairs
polarity_pairs = list ( zip ( deltas [ ( 'subj' , 'polarity' ) ] , deltas [ ( 'obj' , 'polarity' ) ] ) )
# If we have some fully defined pairs , we take the most common one
both_pols = [ pair for pair in polarity_pairs if pair [ 0 ] is not None and pair [ 1 ] is not None ]
if both_pols :
subj_pol , obj_pol = max ( set ( both_pols ) , key = both_pols . count )
stmt . subj . delta [ 'polarity' ] = subj_pol
stmt . obj . delta [ 'polarity' ] = obj_pol
# Otherwise we prefer the case when at least one entry of the
# pair is given
else :
one_pol = [ pair for pair in polarity_pairs if pair [ 0 ] is not None or pair [ 1 ] is not None ]
if one_pol :
subj_pol , obj_pol = max ( set ( one_pol ) , key = one_pol . count )
stmt . subj . delta [ 'polarity' ] = subj_pol
stmt . obj . delta [ 'polarity' ] = obj_pol
# ADJECTIVES
for attr , role in ( ( stmt . subj . delta , 'subj' ) , ( stmt . obj . delta , 'obj' ) ) :
all_adjectives = [ ]
for adj in deltas [ ( role , 'adjectives' ) ] :
if isinstance ( adj , list ) :
all_adjectives += adj
elif adj is not None :
all_adjectives . append ( adj )
attr [ 'adjectives' ] = all_adjectives
stmts_out . append ( stmt )
return stmts_out
|
def replaceData ( self , offset : int , count : int , string : str ) -> None :
"""Replace data from offset to count by string ."""
|
self . _replace_data ( offset , count , string )
|
def gridOn ( self ) : # noqa : N802
"""Control whether the gridline is drawn for this tick ."""
|
return ( self . _gridOn and ( self . _has_default_loc ( ) or transforms . interval_contains ( self . get_view_interval ( ) , self . get_loc ( ) ) ) )
|
def as_bulk_queries ( queries , bulk_size ) :
"""Group a iterable of ( stmt , args ) by stmt into ( stmt , bulk _ args ) .
bulk _ args will be a list of the args grouped by stmt .
len ( bulk _ args ) will be < = bulk _ size"""
|
stmt_dict = defaultdict ( list )
for stmt , args in queries :
bulk_args = stmt_dict [ stmt ]
bulk_args . append ( args )
if len ( bulk_args ) == bulk_size :
yield stmt , bulk_args
del stmt_dict [ stmt ]
for stmt , bulk_args in stmt_dict . items ( ) :
yield stmt , bulk_args
|
def set_fitness ( self , v ) :
"""Set the fitness to a new node .
Returns false in case fitness is not finite"""
|
base = self . _base
self . fitness ( v )
if not np . isfinite ( v . fitness ) :
self . del_error ( v )
return False
if base . _tr_fraction < 1 :
self . fitness_vs ( v )
if not np . isfinite ( v . fitness_vs ) :
self . del_error ( v )
return False
self . del_error ( v )
return True
|
def head ( self , * args , ** kwargs ) :
"""Executes an HTTP HEAD .
: Parameters :
- ` args ` : Non - keyword arguments
- ` kwargs ` : Keyword arguments"""
|
return self . session . head ( * args , ** self . get_kwargs ( ** kwargs ) )
|
def collect_from_bundle ( self , bundle : Bundle ) -> Dict [ str , Any ] :
"""Collect objects where : meth : ` type _ check ` returns ` ` True ` ` from bundles .
Bundle subclasses can override objects discovered in superclass bundles ."""
|
members = { }
hierarchy = ( [ bundle ] if not self . discover_from_bundle_superclasses else bundle . _iter_class_hierarchy ( ) )
for bundle in hierarchy :
module = self . import_bundle_module ( bundle )
if not module :
continue
members . update ( self . _collect_from_package ( module ) )
return members
|
def _find_attrNodeNS ( E , namespaceURI , localName ) :
'''Must grab the attribute Node to distinquish between
an unspecified attribute ( None ) and one set to empty string ( " " ) .
namespaceURI
localName'''
|
attr = E . getAttributeNodeNS ( namespaceURI , localName )
if attr is None :
return None
try :
return attr . value
except :
pass
return E . getAttributeNS ( namespaceURI , localName )
|
def _find_attrNS ( E , namespaceURI , localName ) :
'''namespaceURI
localName'''
|
try :
v = E . getAttributeNS ( namespaceURI , localName )
if v :
return v
except :
pass
return None
|
def t_NUMBER ( self , t ) :
r'( \ d + ( \ . \ d * ) ? | \ . \ d + ) ( [ eE ] [ + - ] ? \ d + ) ? ( kb | gb | mb | tb | pb | Kb | Gb | Mb | Tb | Pb ) ?'
|
if re . match ( r'^(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?(kb|gb|mb|tb|pb|Kb|Gb|Mb|Tb|Pb)?$' , t . value ) :
multiplyer = 1
try :
suffix = ( t . value [ - 2 : ] ) . lower ( )
if suffix in [ 'kb' ] :
multiplyer = 1024
elif suffix in [ 'mb' ] :
multiplyer = 1024 * 1024
elif suffix in [ 'gb' ] :
multiplyer = 1024 * 1024 * 1024
elif suffix in [ 'tb' ] :
multiplyer = 1024 * 1024 * 1024 * 1024
elif suffix in [ 'pb' ] :
multiplyer = 1024 * 1024 * 1024 * 1024 * 1024
if multiplyer > 1 :
t . value = t . value [ : - 2 ]
except :
pass
try :
f = float ( t . value )
try :
e = int ( t . value )
except ValueError :
e = f
if ( e == f ) :
t . value = multiplyer * e
else :
t . value = multiplyer * f
except ValueError :
_LOGGER . error ( "el valor %s no es un numero valido" % t . value )
t . value = 0
else :
t . type = 'STRING'
return t
|
def cklpf ( filename ) :
"""Load a CK pointing file for use by the CK readers . Return that
file ' s handle , to be used by other CK routines to refer to the
file .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / cklpf _ c . html
: param filename : Name of the CK file to be loaded .
: type filename : str
: return : Loaded file ' s handle .
: rtype : int"""
|
filename = stypes . stringToCharP ( filename )
handle = ctypes . c_int ( )
libspice . cklpf_c ( filename , ctypes . byref ( handle ) )
return handle . value
|
def generate_sample_sls_module ( env_root , module_dir = None ) :
"""Generate skeleton Serverless sample module ."""
|
if module_dir is None :
module_dir = os . path . join ( env_root , 'sampleapp.sls' )
generate_sample_module ( module_dir )
for i in [ 'config-dev-us-east-1.json' , 'handler.py' , 'package.json' , 'serverless.yml' ] :
shutil . copyfile ( os . path . join ( ROOT , 'templates' , 'serverless' , i ) , os . path . join ( module_dir , i ) , )
LOGGER . info ( "Sample Serverless module created at %s" , module_dir )
|
def start_log_monitor ( self ) :
"""Start the log monitor ."""
|
stdout_file , stderr_file = self . new_log_files ( "log_monitor" )
process_info = ray . services . start_log_monitor ( self . redis_address , self . _logs_dir , stdout_file = stdout_file , stderr_file = stderr_file , redis_password = self . _ray_params . redis_password )
assert ray_constants . PROCESS_TYPE_LOG_MONITOR not in self . all_processes
self . all_processes [ ray_constants . PROCESS_TYPE_LOG_MONITOR ] = [ process_info ]
|
def get_relationships ( self ) :
"""Gets the relationship list resulting from a search .
return : ( osid . relationship . RelationshipList ) - the relationship
list
raise : IllegalState - list already retrieved
* compliance : mandatory - - This method must be implemented . *"""
|
if self . retrieved :
raise errors . IllegalState ( 'List has already been retrieved.' )
self . retrieved = True
return objects . RelationshipList ( self . _results , runtime = self . _runtime )
|
def tabulate_body ( obj , level_keys , v_level_indexes , h_level_indexes , v_level_sort_keys = None , h_level_sort_keys = None , ) :
"""Args :
v _ level _ indexes : A sequence of level indexes .
h _ level _ indexes : A sequence of level indexes ."""
|
v_key_sorted = make_sorter ( v_level_sort_keys , v_level_indexes )
h_key_sorted = make_sorter ( h_level_sort_keys , h_level_indexes )
h_level_keys = [ level_keys [ level ] for level in h_level_indexes ]
v_level_keys = [ level_keys [ level ] for level in v_level_indexes ]
h_key_tuples = h_key_sorted ( product ( * h_level_keys ) )
v_key_tuples = v_key_sorted ( product ( * v_level_keys ) )
h_size = len ( h_key_tuples )
v_size = len ( v_key_tuples )
table = [ [ MISSING for _ in range ( h_size ) ] for _ in range ( v_size ) ]
for h_index , h_keys in enumerate ( h_key_tuples ) :
for v_index , v_keys in enumerate ( v_key_tuples ) :
key_path = [ None ] * len ( level_keys )
merge_into_by_index ( key_path , h_level_indexes , h_keys )
merge_into_by_index ( key_path , v_level_indexes , v_keys )
for v_level , v_key in zip ( v_level_indexes , v_keys ) :
key_path [ v_level ] = v_key
item = obj
for key in key_path :
try :
item = item [ key ]
except ( IndexError , KeyError ) :
break
else : # no - break
table [ v_index ] [ h_index ] = item
return table , v_key_tuples , h_key_tuples
|
def predict_expectation ( self , X , ancillary_X = None ) :
"""Predict the expectation of lifetimes , : math : ` E [ T | x ] ` .
Parameters
X : numpy array or DataFrame
a ( n , d ) covariate numpy array or DataFrame . If a DataFrame , columns
can be in any order . If a numpy array , columns must be in the
same order as the training data .
ancillary _ X : numpy array or DataFrame , optional
a ( n , d ) covariate numpy array or DataFrame . If a DataFrame , columns
can be in any order . If a numpy array , columns must be in the
same order as the training data .
Returns
percentiles : DataFrame
the median lifetimes for the individuals . If the survival curve of an
individual does not cross 0.5 , then the result is infinity .
See Also
predict _ median"""
|
lambda_ , rho_ = self . _prep_inputs_for_prediction_and_return_scores ( X , ancillary_X )
return pd . DataFrame ( ( lambda_ * gamma ( 1 + 1 / rho_ ) ) , index = _get_index ( X ) )
|
def write_data ( self , data , dstart = None , swap_axes = True ) :
"""Write ` ` data ` ` to ` file ` .
Parameters
data : ` array - like `
Data that should be written to ` file ` .
dstart : non - negative int , optional
Offset in bytes of the start position of the written data .
If provided , reshaping and axis swapping of ` ` data ` ` is
skipped .
For ` ` None ` ` , ` header _ size ` is used .
swap _ axes : bool , optional
If ` ` True ` ` , use the ` ` ' mapc ' , ' mapr ' , ' maps ' ` ` header entries
to swap the axes in the ` ` data ` ` before writing . Use ` ` False ` `
only if the data is already consistent with the final axis
order ."""
|
if dstart is None :
shape = self . data_shape
dstart = int ( self . header_size )
elif dstart < 0 :
raise ValueError ( '`dstart` must be non-negative, got {}' '' . format ( dstart ) )
else :
shape = - 1
dstart = int ( dstart )
if dstart < self . header_size :
raise ValueError ( 'invalid `dstart`, resulting in absolute ' '`dstart` < `header_size` ({} < {})' '' . format ( dstart , self . header_size ) )
data = np . asarray ( data , dtype = self . data_dtype ) . reshape ( shape )
if swap_axes : # Need to argsort here since ` data _ axis _ order ` tells
# " which axis comes from where " , which is the inverse of what the
# ` transpose ` function needs .
data = np . transpose ( data , axes = np . argsort ( self . data_axis_order ) )
assert data . shape == self . data_storage_shape
data = data . reshape ( - 1 , order = 'F' )
self . file . seek ( dstart )
data . tofile ( self . file )
|
def write_warc ( self , resources = None , dumpfile = None ) :
"""Write a WARC dump file .
WARC support is not part of ResourceSync v1.0 ( Z39.99 2014 ) but is left
in this library for experimentation ."""
|
# Load library late as we want to be able to run rest of code
# without this installed
try :
from warc import WARCFile , WARCHeader , WARCRecord
except :
raise DumpError ( "Failed to load WARC library" )
wf = WARCFile ( dumpfile , mode = "w" , compress = self . compress )
# Add all files in the resources
for resource in resources :
wh = WARCHeader ( { } )
wh . url = resource . uri
wh . ip_address = None
wh . date = resource . lastmod
wh . content_type = 'text/plain'
wh . result_code = 200
wh . checksum = 'aabbcc'
wh . location = self . archive_path ( resource . path )
wf . write_record ( WARCRecord ( header = wh , payload = resource . path ) )
wf . close ( )
warcsize = os . path . getsize ( dumpfile )
self . logging . info ( "Wrote WARC file dump %s with size %d bytes" % ( dumpfile , warcsize ) )
|
def get_arc ( self , x_start , y , y_curve , x_end ) :
"""Render individual arc .
x _ start ( int ) : X - coordinate of arrow start point .
y ( int ) : Y - coordinate of arrow start and end point .
y _ curve ( int ) : Y - corrdinate of Cubic Bézier y _ curve point .
x _ end ( int ) : X - coordinate of arrow end point .
RETURNS ( unicode ) : Definition of the arc path ( ' d ' attribute ) ."""
|
template = "M{x},{y} C{x},{c} {e},{c} {e},{y}"
if self . compact :
template = "M{x},{y} {x},{c} {e},{c} {e},{y}"
return template . format ( x = x_start , y = y , c = y_curve , e = x_end )
|
def handle_format_group ( self , field , text ) :
"""Handle format group ."""
|
# Handle auto incrementing group indexes
if field == '' :
if self . auto :
field = str ( self . auto_index )
text [ 0 ] = ( _util . FMT_FIELD , field )
self . auto_index += 1
elif not self . manual and not self . auto :
self . auto = True
field = str ( self . auto_index )
text [ 0 ] = ( _util . FMT_FIELD , field )
self . auto_index += 1
else :
raise ValueError ( "Cannot switch to auto format during manual format!" )
elif not self . manual and not self . auto :
self . manual = True
elif not self . manual :
raise ValueError ( "Cannot switch to manual format during auto format!" )
self . handle_group ( field , tuple ( text ) , True )
|
def _verify_dict ( self , conf ) :
"""Check that the configuration contains all necessary keys .
: type conf : dict
: rtype : None
: raise SATOSAConfigurationError : if the configuration is incorrect
: param conf : config to verify
: return : None"""
|
if not conf :
raise SATOSAConfigurationError ( "Missing configuration or unknown format" )
for key in SATOSAConfig . mandatory_dict_keys :
if key not in conf :
raise SATOSAConfigurationError ( "Missing key '%s' in config" % key )
for key in SATOSAConfig . sensitive_dict_keys :
if key not in conf and "SATOSA_{key}" . format ( key = key ) not in os . environ :
raise SATOSAConfigurationError ( "Missing key '%s' from config and ENVIRONMENT" % key )
|
def load_results ( result_files , options , run_set_id = None , columns = None , columns_relevant_for_diff = set ( ) ) :
"""Version of load _ result for multiple input files that will be loaded concurrently ."""
|
return parallel . map ( load_result , result_files , itertools . repeat ( options ) , itertools . repeat ( run_set_id ) , itertools . repeat ( columns ) , itertools . repeat ( columns_relevant_for_diff ) )
|
def _get_client ( self ) :
"""Swift client
Returns :
swiftclient . client . Connection : client"""
|
kwargs = self . _storage_parameters
# Handles unsecure mode
if self . _unsecure :
kwargs = kwargs . copy ( )
kwargs [ 'ssl_compression' ] = False
return _swift . client . Connection ( ** kwargs )
|
def MultimodeCombine ( pupils ) :
"""Return the instantaneous coherent fluxes and photometric fluxes for a
multiway multimode combiner ( no spatial filtering )"""
|
fluxes = [ np . vdot ( pupils [ i ] , pupils [ i ] ) . real for i in range ( len ( pupils ) ) ]
coherentFluxes = [ np . vdot ( pupils [ i ] , pupils [ j ] ) for i in range ( 1 , len ( pupils ) ) for j in range ( i ) ]
return fluxes , coherentFluxes
|
def pip ( self , points , sorted_col = 0 , radius = 0 ) :
"""Point - in - Polygon for the z = 0 projection . This function enhances
the performance of ` ` Polygon . contains ( ) ` ` by verifying only the
points which are inside the bounding box of the polygon . To do
it fast , it needs the points array to be already sorted by one
column .
: param points : list of * ( x , y , z ) or ( x , y ) * coordinates of the
points to check . ( The z value will not be taken into
account ) .
: type points : ndarray ( shape = ( N , 2 or 3 ) )
: param sorted _ col : Index of the sorted column ( 0 or 1 ) .
: type sorted _ col : int
: param radius : Enlarge Polygons domain by a specified quantity .
: type radius : float
: returns : Which points are inside the polygon .
: rtype : ndarray ( dtpye = bool )
. . warning : : By default pip considers that the set of points is
currently sorted by the first column .
. . warning : : This method only works if the polygon has been
locked ( : func : ` lock ` ) ."""
|
xy = points [ : , : 2 ]
n_points = xy . shape [ 0 ]
index = np . arange ( n_points , dtype = int )
b = self . domain
b [ 0 ] = b [ 0 ] - radius
b [ 1 ] = b [ 1 ] + radius
# Slicing the sorted column
k = np . searchsorted ( xy [ : , sorted_col ] , ( b [ 0 , sorted_col ] , b [ 1 , sorted_col ] + 1e-10 ) )
xy = xy [ k [ 0 ] : k [ 1 ] ]
index = index [ k [ 0 ] : k [ 1 ] ]
# solution
k = index [ self . path . contains_points ( xy , radius = radius ) ]
sol = np . zeros ( n_points , dtype = bool )
sol [ k ] = True
return sol
|
def execute ( self , container , start = None , end = None , registers = None ) :
"""Execute instructions ."""
|
if registers :
self . __cpu . registers = dict ( registers )
ip = start if start else container [ 0 ] . address
while ip and ip != end :
try :
instr = container . fetch ( ip )
except ReilContainerInvalidAddressError :
logger . info ( "Invalid address: {:#010x}:{:#02x}" . format ( ip >> 8 , ip & 0xff ) )
raise ReilCpuInvalidAddressError ( )
next_ip = self . __execute_one ( instr )
ip = next_ip if next_ip else container . get_next_address ( ip )
return dict ( self . __cpu . registers ) , self . __mem
|
def _partition_and_stitch ( self , args , func_name ) :
"""args is a list of tensors , to be passed to self . likelihoods . < func _ name >
args [ - 1 ] is the ' Y ' argument , which contains the indexes to self . likelihoods .
This function splits up the args using dynamic _ partition , calls the
relevant function on the likelihoods , and re - combines the result ."""
|
# get the index from Y
Y = args [ - 1 ]
ind = Y [ : , - 1 ]
ind = tf . cast ( ind , tf . int32 )
Y = Y [ : , : - 1 ]
args [ - 1 ] = Y
# split up the arguments into chunks corresponding to the relevant likelihoods
args = zip ( * [ tf . dynamic_partition ( X , ind , self . num_likelihoods ) for X in args ] )
# apply the likelihood - function to each section of the data
with params_as_tensors_for ( self , convert = False ) :
funcs = [ getattr ( lik , func_name ) for lik in self . likelihood_list ]
results = [ f ( * args_i ) for f , args_i in zip ( funcs , args ) ]
# stitch the results back together
partitions = tf . dynamic_partition ( tf . range ( 0 , tf . size ( ind ) ) , ind , self . num_likelihoods )
results = tf . dynamic_stitch ( partitions , results )
return results
|
def start_task ( self , task_tag , skip_unresolved = False ) :
"""Check dependency for the given task _ tag and start task . For dependency checking see
: meth : ` . WTaskDependencyRegistryStorage . dependency _ check ` . If task is already started then it must be
stopped before it will be started again .
: param task _ tag : task to start . Any required dependencies will be started automatically .
: param skip _ unresolved : flag controls this method behaviour for tasks that could not be found . When False , method will raise an exception if task tag was set in dependency and the related task wasn ' t found in registry . When True that unresolvable task will be omitted
: return : None"""
|
if self . started_tasks ( task_registry_id = task_tag ) is not None :
return
task_cls = self . tasks_by_tag ( task_tag )
if task_cls is None :
raise RuntimeError ( "Task '%s' wasn't found" % task_tag )
self . dependency_check ( task_cls , skip_unresolved = skip_unresolved )
def start_dependency ( start_task_cls ) :
for dependency in start_task_cls . __dependency__ :
if self . started_tasks ( task_registry_id = dependency ) is not None :
continue
dependent_task = self . tasks_by_tag ( dependency )
if dependent_task is not None :
start_dependency ( dependent_task )
self . __started . append ( start_task_cls . start_dependent_task ( ) )
start_dependency ( task_cls )
|
def group_records_by_category ( self ) :
"""Return the records grouped by the category of their source .
The return value is a dict , a key in this dict is a category
and the value is a list of all the records with this category ."""
|
Source . validate_categories ( categories )
key_function = lambda record : record . source . category
return self . group_records ( key_function )
|
def get_cpu_info_json ( ) :
'''Returns the CPU info by using the best sources of information for your OS .
Returns the result in a json string'''
|
import json
output = None
# If running under pyinstaller , run normally
if getattr ( sys , 'frozen' , False ) :
info = _get_cpu_info_internal ( )
output = json . dumps ( info )
output = "{0}" . format ( output )
# if not running under pyinstaller , run in another process .
# This is done because multiprocesing has a design flaw that
# causes non main programs to run multiple times on Windows .
else :
from subprocess import Popen , PIPE
command = [ sys . executable , __file__ , '--json' ]
p1 = Popen ( command , stdout = PIPE , stderr = PIPE , stdin = PIPE )
output = p1 . communicate ( ) [ 0 ]
if p1 . returncode != 0 :
return "{}"
if not IS_PY2 :
output = output . decode ( encoding = 'UTF-8' )
return output
|
def fastq_iter ( handle , header = None ) :
"""Iterate over FASTQ file and return FASTQ entries
Args :
handle ( file ) : FASTQ file handle , can be any iterator so long as it
it returns subsequent " lines " of a FASTQ entry
header ( str ) : Header line of next FASTQ entry , if ' handle ' has been
partially read and you want to start iterating at the next entry ,
read the next FASTQ header and pass it to this variable when
calling fastq _ iter . See ' Examples . '
Yields :
FastqEntry : class containing all FASTQ data
Raises :
IOError : If FASTQ entry doesn ' t start with ' @ '
Examples :
The following two examples demonstrate how to use fastq _ iter .
Note : These doctests will not pass , examples are only in doctest
format as per convention . bio _ utils uses pytests for testing .
> > > for entry in fastq _ iter ( open ( ' test . fastq ' ) ) :
. . . print ( entry . id ) # Print FASTQ id
. . . print ( entry . description ) # Print FASTQ description
. . . print ( entry . sequence ) # Print FASTQ sequence
. . . print ( entry . quality ) # Print FASTQ quality scores
. . . print ( entry . write ( ) ) # Print full FASTQ entry
> > > fastq _ handle = open ( ' test . fastq ' )
> > > next ( fastq _ handle ) # Skip first entry header
> > > next ( fastq _ handle ) # Skip first entry sequence
> > > next ( fastq _ handle ) # Skip line with ' + '
> > > next ( fastq _ handle ) # Skip first entry quality scores
> > > first _ line = next ( fastq _ handle ) # Read second entry header
> > > for entry in fastq _ iter ( fastq _ handle , header = first _ line ) :
. . . print ( entry . id ) # Print FASTQ id
. . . print ( entry . description ) # Print FASTQ description
. . . print ( entry . sequence ) # Print FASTQ sequence
. . . print ( entry . quality ) # Print FASTQ quality scores
. . . print ( entry . write ( ) ) # Print full FASTQ entry"""
|
# Speed tricks : reduces function calls
append = list . append
join = str . join
strip = str . strip
next_line = next
if header is None :
header = next ( handle )
# Read first FASTQ entry header
# Check if input is text or bytestream
if ( isinstance ( header , bytes ) ) :
def next_line ( i ) :
return next ( i ) . decode ( 'utf-8' )
header = strip ( header . decode ( 'utf-8' ) )
else :
header = strip ( header )
try : # Manually construct a for loop to improve speed by using ' next '
while True : # Loop until StopIteration Exception raised
line = strip ( next_line ( handle ) )
data = FastqEntry ( )
if not header [ 0 ] == '@' :
raise IOError ( 'Bad FASTQ format: no "@" at beginning of line' )
try :
data . id , data . description = header [ 1 : ] . split ( ' ' , 1 )
except ValueError : # No description
data . id = header [ 1 : ]
data . description = ''
# obtain sequence
sequence_list = [ ]
while line and not line [ 0 ] == '+' and not line [ 0 ] == '#' :
append ( sequence_list , line )
line = strip ( next_line ( handle ) )
data . sequence = join ( '' , sequence_list )
line = strip ( next_line ( handle ) )
# Skip line containing only ' + '
# Obtain quality scores
quality_list = [ ]
seq_len = len ( data . sequence )
qual_len = 0
while line and qual_len < seq_len :
append ( quality_list , line )
qual_len += len ( line )
line = strip ( next_line ( handle ) )
# Raises StopIteration at EOF
header = line
# Store current line so it ' s not lost next iteration
data . quality = join ( '' , quality_list )
yield data
except StopIteration : # Yield last FASTQ entry
data . quality = join ( '' , quality_list )
yield data
|
def start_receive ( self , fd , data = None ) :
"""Cause : meth : ` poll ` to yield ` data ` when ` fd ` is readable ."""
|
self . _rfds [ fd ] = ( data or fd , self . _generation )
self . _update ( fd )
|
def refresh_actions ( self ) :
"""Create options menu ."""
|
self . options_menu . clear ( )
# Decide what additional actions to show
if self . undocked_window is None :
additional_actions = [ MENU_SEPARATOR , self . undock_action , self . close_plugin_action ]
else :
additional_actions = [ MENU_SEPARATOR , self . dock_action ]
# Create actions list
self . plugin_actions = self . get_plugin_actions ( ) + additional_actions
add_actions ( self . options_menu , self . plugin_actions )
|
def remapScipy ( im , coords ) :
"""Remap an image using SciPy . See : func : ` remap ` for parameters ."""
|
height , width = im . shape [ 0 ] , im . shape [ 1 ]
# switch to y , x order
coords = coords [ : , : , : : - 1 ]
# make it ( h , w , 3 , 3)
coords_channels = np . empty ( ( height , width , 3 , 3 ) )
coords_channel = np . zeros ( ( height , width , 3 ) )
coords_channel [ : , : , : 2 ] = coords
coords_channels [ : , : , 0 ] = coords_channel
coords_channels [ : , : , 1 ] = coords_channel
coords_channels [ : , : , 1 , 2 ] = 1
coords_channels [ : , : , 2 ] = coords_channel
coords_channels [ : , : , 2 , 2 ] = 2
coords = coords_channels
# (3 , h , w , 3)
coords = np . rollaxis ( coords , 3 )
return map_coordinates ( im , coords , order = 1 )
|
def _get_img_attrs ( self , style , kwargs ) :
"""Get the attributes of an an < img > tag for this image , hidpi - aware"""
|
# Get the 1x and 2x renditions
img_1x , img_2x , size = self . _get_renditions ( kwargs )
return { 'src' : img_1x , 'width' : size [ 0 ] , 'height' : size [ 1 ] , 'srcset' : "{} 1x, {} 2x" . format ( img_1x , img_2x ) if img_1x != img_2x else None , 'style' : ';' . join ( style ) if style else None , 'class' : kwargs . get ( 'class' , kwargs . get ( 'img_class' ) ) , 'id' : kwargs . get ( 'img_id' ) }
|
def adjust_for_triggers ( self ) :
"""Remove trigger - related plugins when needed
If there are no triggers defined , it ' s assumed the
feature is disabled and all trigger - related plugins
are removed .
If there are triggers defined , and this is a custom
base image , some trigger - related plugins do not apply .
Additionally , this method ensures that custom base
images never have triggers since triggering a base
image rebuild is not a valid scenario ."""
|
triggers = self . template [ 'spec' ] . get ( 'triggers' , [ ] )
remove_plugins = [ ( "prebuild_plugins" , "check_and_set_rebuild" ) , ( "prebuild_plugins" , "stop_autorebuild_if_disabled" ) , ]
should_remove = False
if triggers and ( self . is_custom_base_image ( ) or self . is_from_scratch_image ( ) ) :
if self . is_custom_base_image ( ) :
msg = "removing %s from request because custom base image"
elif self . is_from_scratch_image ( ) :
msg = 'removing %s from request because FROM scratch image'
del self . template [ 'spec' ] [ 'triggers' ]
should_remove = True
elif not triggers :
msg = "removing %s from request because there are no triggers"
should_remove = True
if should_remove :
for when , which in remove_plugins :
logger . info ( msg , which )
self . dj . remove_plugin ( when , which )
|
def do_ls ( self , nothing = '' ) :
"""list files in current remote directory"""
|
for d in self . dirs :
self . stdout . write ( "\033[0;34m" + ( '%s\n' % d ) + "\033[0m" )
for f in self . files :
self . stdout . write ( '%s\n' % f )
|
def _describe_atom ( topology , index ) :
"""Returns a string describing the given atom
: param topology :
: param index :
: return :"""
|
at = topology . atom ( index )
if topology . n_chains > 1 :
return "%s %i %s %i %i" % ( at . residue . name , at . residue . resSeq , at . name , at . index , at . residue . chain . index )
else :
return "%s %i %s %i" % ( at . residue . name , at . residue . resSeq , at . name , at . index )
|
def _example_broker_queue ( quote_ctx ) :
"""获取经纪队列 , 输出 买盘卖盘的经纪ID , 经纪名称 , 经纪档位"""
|
stock_code_list = [ "HK.00700" ]
for stk_code in stock_code_list :
ret_status , ret_data = quote_ctx . subscribe ( stk_code , ft . SubType . BROKER )
if ret_status != ft . RET_OK :
print ( ret_data )
exit ( )
for stk_code in stock_code_list :
ret_status , bid_data , ask_data = quote_ctx . get_broker_queue ( stk_code )
if ret_status != ft . RET_OK :
print ( bid_data )
exit ( )
print ( "%s BROKER" % stk_code )
print ( ask_data )
print ( "\n\n" )
print ( bid_data )
print ( "\n\n" )
|
def reset ( self , context ) :
"""Return a reference , forcing close and discard of the underlying
connection . Used for ' meta : reset _ connection ' or when some other error
is detected ."""
|
LOG . debug ( '%r.reset(%r)' , self , context )
self . _lock . acquire ( )
try :
self . _shutdown_unlocked ( context )
finally :
self . _lock . release ( )
|
def is_interesting ( entry ) :
"""Is this entry interesting ?
` ` entry ` ` is an XML node representing one entry of the svn status
XML output . It looks like this : :
< entry path = " unchanged . txt " >
< wc - status item = " normal " revision = " 1 " props = " none " >
< commit revision = " 1 " >
< author > mg < / author >
< date > 2015-02-06T07:52:38.163516Z < / date >
< / commit >
< / wc - status >
< / entry >
< entry path = " added - but - not - committed . txt " >
< wc - status item = " added " revision = " - 1 " props = " none " > < / wc - status >
< / entry >
< entry path = " ext " >
< wc - status item = " external " props = " none " > < / wc - status >
< / entry >
< entry path = " unknown . txt " >
< wc - status props = " none " item = " unversioned " > < / wc - status >
< / entry >"""
|
if entry . get ( 'path' ) == '.' :
return False
status = entry . find ( 'wc-status' )
if status is None :
warning ( 'svn status --xml parse error: <entry path="%s"> without' ' <wc-status>' % entry . get ( 'path' ) )
return False
# For SVN externals we get two entries : one mentioning the
# existence of the external , and one about the status of the external .
if status . get ( 'item' ) in ( 'unversioned' , 'external' ) :
return False
return True
|
def port_profile_vlan_profile_switchport_access_mac_group_vlan_classification_access_vlan_access_mac_group ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
port_profile = ET . SubElement ( config , "port-profile" , xmlns = "urn:brocade.com:mgmt:brocade-port-profile" )
name_key = ET . SubElement ( port_profile , "name" )
name_key . text = kwargs . pop ( 'name' )
vlan_profile = ET . SubElement ( port_profile , "vlan-profile" )
switchport = ET . SubElement ( vlan_profile , "switchport" )
access_mac_group_vlan_classification = ET . SubElement ( switchport , "access-mac-group-vlan-classification" )
access = ET . SubElement ( access_mac_group_vlan_classification , "access" )
vlan = ET . SubElement ( access , "vlan" )
access_vlan_id_key = ET . SubElement ( vlan , "access-vlan-id" )
access_vlan_id_key . text = kwargs . pop ( 'access_vlan_id' )
access_mac_group = ET . SubElement ( vlan , "access-mac-group" )
access_mac_group . text = kwargs . pop ( 'access_mac_group' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def register_app_activity ( ) :
"""Create watchers for models defined in settings . py .
Once created , they will be passed over
Activity . objects . follow _ model ( ) , which lives in managers . py"""
|
from django . conf import settings
from django . contrib . contenttypes . models import ContentType
from . models import Activity
# TO - DO : Add check for existence of setting
if not hasattr ( settings , 'ACTIVITY_MONITOR_MODELS' ) :
return
for item in settings . ACTIVITY_MONITOR_MODELS :
try :
app_label , model = item [ 'model' ] . split ( '.' , 1 )
content_type = ContentType . objects . get ( app_label = app_label , model = model )
model = content_type . model_class ( )
Activity . objects . follow_model ( model )
except ContentType . DoesNotExist :
pass
|
def change_column_length ( table : Table , column : Column , length : int , engine : Engine ) -> None :
"""Change the column length in the supplied table"""
|
if column . type . length < length :
print ( "Changing length of {} from {} to {}" . format ( column , column . type . length , length ) )
column . type . length = length
column_name = column . name
column_type = column . type . compile ( engine . dialect )
engine . execute ( 'ALTER TABLE {table} ALTER COLUMN {column_name} TYPE {column_type}' . format ( ** locals ( ) ) )
|
def ibis_schema_apply_to ( schema , df ) :
"""Applies the Ibis schema to a pandas DataFrame
Parameters
schema : ibis . schema . Schema
df : pandas . DataFrame
Returns
df : pandas . DataFrame
Notes
Mutates ` df `"""
|
for column , dtype in schema . items ( ) :
pandas_dtype = dtype . to_pandas ( )
col = df [ column ]
col_dtype = col . dtype
try :
not_equal = pandas_dtype != col_dtype
except TypeError : # ugh , we can ' t compare dtypes coming from pandas , assume not equal
not_equal = True
if not_equal or dtype == dt . string :
df [ column ] = convert ( col_dtype , dtype , col )
return df
|
def calc_wada_waes_v1 ( self ) :
"""Calculate the actual water release from the snow cover .
Required control parameters :
| NHRU |
| Lnk |
| PWMax |
Required flux sequences :
| NBes |
Calculated flux sequence :
| WaDa |
Updated state sequence :
| WAeS |
Basic equations :
: math : ` \\ frac { dWAeS } { dt } = NBes - WaDa `
: math : ` WAeS \\ leq PWMax \\ cdot WATS `
Examples :
For simplicity , the threshold parameter | PWMax | is set to a value
of two for each of the six initialized HRUs . Thus , snow cover can
hold as much liquid water as it contains frozen water . Stand
precipitation is also always set to the same value , but the initial
conditions of the snow cover are varied :
> > > from hydpy . models . lland import *
> > > parameterstep ( ' 1d ' )
> > > nhru ( 6)
> > > lnk ( FLUSS , SEE , ACKER , ACKER , ACKER , ACKER )
> > > pwmax ( 2.0)
> > > fluxes . nbes = 1.0
> > > states . wats = 0.0 , 0.0 , 0.0 , 1.0 , 1.0 , 1.0
> > > states . waes = 1.0 , 1.0 , 0.0 , 1.0 , 1.5 , 2.0
> > > model . calc _ wada _ waes _ v1 ( )
> > > states . waes
waes ( 0.0 , 0.0 , 0.0 , 2.0 , 2.0 , 2.0)
> > > fluxes . wada
wada ( 1.0 , 1.0 , 1.0 , 0.0 , 0.5 , 1.0)
Note the special cases of the first two HRUs of type | FLUSS | and
| SEE | . For water areas , stand precipitaton | NBes | is generally
passed to | WaDa | and | WAeS | is set to zero . For all other land
use classes ( of which only | ACKER | is selected ) , only the amount
of | NBes | exceeding the actual snow holding capacity is passed
to | WaDa | ."""
|
con = self . parameters . control . fastaccess
flu = self . sequences . fluxes . fastaccess
sta = self . sequences . states . fastaccess
for k in range ( con . nhru ) :
if con . lnk [ k ] in ( WASSER , FLUSS , SEE ) :
sta . waes [ k ] = 0.
flu . wada [ k ] = flu . nbes [ k ]
else :
sta . waes [ k ] += flu . nbes [ k ]
flu . wada [ k ] = max ( sta . waes [ k ] - con . pwmax [ k ] * sta . wats [ k ] , 0. )
sta . waes [ k ] -= flu . wada [ k ]
|
def write_event ( self , event ) :
"""Appends event to the file ."""
|
# Check if event is of type event _ pb2 . Event proto .
if not isinstance ( event , event_pb2 . Event ) :
raise TypeError ( "expected an event_pb2.Event proto, " " but got %s" % type ( event ) )
return self . _write_serialized_event ( event . SerializeToString ( ) )
|
def process_paper_helper ( model_name , pmid , start_time_local ) :
"""Wraps processing a paper by either a local or remote service
and caches any uncaught exceptions"""
|
try :
if not aws_available :
rp , txt_format = process_paper ( model_name , pmid )
else :
rp , txt_format = process_paper_aws ( pmid , start_time_local )
except :
logger . exception ( 'uncaught exception while processing %s' , pmid )
return None , None
return rp , txt_format
|
async def is_change_done ( self , zone , change_id ) :
"""Check if a DNS change has completed .
Args :
zone ( str ) : DNS zone of the change .
change _ id ( str ) : Identifier of the change .
Returns :
Boolean"""
|
zone_id = self . get_managed_zone ( zone )
url = f'{self._base_url}/managedZones/{zone_id}/changes/{change_id}'
resp = await self . get_json ( url )
return resp [ 'status' ] == self . DNS_CHANGES_DONE
|
def url_encode_stream ( obj , stream = None , charset = 'utf-8' , encode_keys = False , sort = False , key = None , separator = '&' ) :
"""Like : meth : ` url _ encode ` but writes the results to a stream
object . If the stream is ` None ` a generator over all encoded
pairs is returned .
. . versionadded : : 0.8
: param obj : the object to encode into a query string .
: param stream : a stream to write the encoded object into or ` None ` if
an iterator over the encoded pairs should be returned . In
that case the separator argument is ignored .
: param charset : the charset of the query string .
: param encode _ keys : set to ` True ` if you have unicode keys .
: param sort : set to ` True ` if you want parameters to be sorted by ` key ` .
: param separator : the separator to be used for the pairs .
: param key : an optional function to be used for sorting . For more details
check out the : func : ` sorted ` documentation ."""
|
gen = _url_encode_impl ( obj , charset , encode_keys , sort , key )
if stream is None :
return gen
for idx , chunk in enumerate ( gen ) :
if idx :
stream . write ( separator )
stream . write ( chunk )
|
def duty_cycle ( self ) :
"""16 bit value that dictates how much of one cycle is high ( 1 ) versus low ( 0 ) . 0xffff will
always be high , 0 will always be low and 0x7fff will be half high and then half low ."""
|
pwm = self . _pca . pwm_regs [ self . _index ]
if pwm [ 0 ] == 0x1000 :
return 0xffff
return pwm [ 1 ] << 4
|
def ceil ( self ) :
"""Round ` x ` and ` y ` up to integers ."""
|
return Point ( int ( math . ceil ( self . x ) ) , int ( math . ceil ( self . y ) ) )
|
def ssh_known_host_key ( host , application_name , user = None ) :
"""Return the first entry in known _ hosts for host .
: param host : hostname to lookup in file .
: type host : str
: param application _ name : Name of application eg nova - compute - something
: type application _ name : str
: param user : The user that the ssh asserts are for .
: type user : str
: returns : Host key
: rtype : str or None"""
|
cmd = [ 'ssh-keygen' , '-f' , known_hosts ( application_name , user ) , '-H' , '-F' , host ]
try : # The first line of output is like ' # Host xx found : line 1 type RSA ' ,
# which should be excluded .
output = subprocess . check_output ( cmd )
except subprocess . CalledProcessError as e : # RC of 1 seems to be legitimate for most ssh - keygen - F calls .
if e . returncode == 1 :
output = e . output
else :
raise
output = output . strip ( )
if output : # Bug # 1500589 cmd has 0 rc on precise if entry not present
lines = output . split ( '\n' )
if len ( lines ) >= 1 :
return lines [ 0 ]
return None
|
def source_attr ( attr_name ) :
"""Creates a getter that will drop the current value
and retrieve the source ' s attribute with specified name .
@ param attr _ name : the name of an attribute belonging to the source .
@ type attr _ name : str"""
|
def source_attr ( _value , context , ** _params ) :
value = getattr ( context [ "model" ] . source , attr_name )
return _attr ( value )
return source_attr
|
def neighbors ( self ) :
"""A dictionary with neighbors
The dictionary will have the following form :
` ` { vertexX : ( vertexY1 , vertexY2 , . . . ) , . . . } ` `
This means that vertexX and vertexY1 are connected etc . This also
implies that the following elements are part of the dictionary :
` ` { vertexY1 : ( vertexX , . . . ) , vertexY2 : ( vertexX , . . . ) , . . . } ` ` ."""
|
neighbors = dict ( ( vertex , [ ] ) for vertex in range ( self . num_vertices ) )
for a , b in self . edges :
neighbors [ a ] . append ( b )
neighbors [ b ] . append ( a )
# turn lists into frozensets
neighbors = dict ( ( key , frozenset ( val ) ) for key , val in neighbors . items ( ) )
return neighbors
|
def range ( self , dimension , data_range = True , dimension_range = True ) :
"""Return the lower and upper bounds of values along dimension .
Args :
dimension : The dimension to compute the range on .
data _ range ( bool ) : Compute range from data values
dimension _ range ( bool ) : Include Dimension ranges
Whether to include Dimension range and soft _ range
in range calculation
Returns :
Tuple containing the lower and upper bound"""
|
dimension = self . get_dimension ( dimension )
if dimension is None or ( not data_range and not dimension_range ) :
return ( None , None )
elif all ( util . isfinite ( v ) for v in dimension . range ) and dimension_range :
return dimension . range
elif data_range :
if dimension in self . kdims + self . vdims :
dim_vals = self . dimension_values ( dimension . name )
lower , upper = util . find_range ( dim_vals )
else :
dname = dimension . name
match_fn = lambda x : dname in x . kdims + x . vdims
range_fn = lambda x : x . range ( dname )
ranges = self . traverse ( range_fn , [ match_fn ] )
lower , upper = util . max_range ( ranges )
else :
lower , upper = ( np . NaN , np . NaN )
if not dimension_range :
return lower , upper
return util . dimension_range ( lower , upper , dimension . range , dimension . soft_range )
|
def handle_line ( self , frame , arg ) :
"""This function is called when we stop or break at this line ."""
|
log . info ( 'Stopping at line %s' % pretty_frame ( frame ) )
self . interaction ( frame )
|
def user_view_task ( self , ) :
"""View the task that is selected
: returns : None
: rtype : None
: raises : None"""
|
if not self . cur_user :
return
i = self . user_task_treev . currentIndex ( )
item = i . internalPointer ( )
if item :
task = item . internal_data ( )
if isinstance ( task , djadapter . models . Task ) :
self . view_task ( task )
|
def enable_file_approve ( self , enable = True ) :
"""Enables or disables menu item ( for entering / leaving save mode )"""
|
approve_item = self . shortcut2menuitem [ _ ( "&Approve file" ) ]
approve_item . Enable ( enable )
|
def update_groups_for_user ( self , user : User , state : State = None ) :
"""Update the Group memberships for the given users state
: param user : User to update for
: param state : State to update user for
: return :"""
|
if state is None :
state = user . profile . state
for config in self . filter ( states = state ) : # grant user new groups for their state
config . update_group_membership_for_user ( user )
for config in self . exclude ( states = state ) : # ensure user does not have groups from previous state
config . remove_user_from_alliance_groups ( user )
config . remove_user_from_corp_groups ( user )
|
def pin_chat_message ( self , * args , ** kwargs ) :
"""See : func : ` pin _ chat _ message `"""
|
return pin_chat_message ( * args , ** self . _merge_overrides ( ** kwargs ) ) . run ( )
|
def _setup_firefox ( self , capabilities ) :
"""Setup Firefox webdriver
: param capabilities : capabilities object
: returns : a new local Firefox driver"""
|
if capabilities . get ( "marionette" ) :
gecko_driver = self . config . get ( 'Driver' , 'gecko_driver_path' )
self . logger . debug ( "Gecko driver path given in properties: %s" , gecko_driver )
else :
gecko_driver = None
# Get Firefox binary
firefox_binary = self . config . get_optional ( 'Firefox' , 'binary' )
firefox_options = Options ( )
if self . config . getboolean_optional ( 'Driver' , 'headless' ) :
self . logger . debug ( "Running Firefox in headless mode" )
firefox_options . add_argument ( '-headless' )
self . _add_firefox_arguments ( firefox_options )
if firefox_binary :
firefox_options . binary = firefox_binary
log_path = os . path . join ( DriverWrappersPool . output_directory , 'geckodriver.log' )
try : # Selenium 3
return webdriver . Firefox ( firefox_profile = self . _create_firefox_profile ( ) , capabilities = capabilities , executable_path = gecko_driver , firefox_options = firefox_options , log_path = log_path )
except TypeError : # Selenium 2
return webdriver . Firefox ( firefox_profile = self . _create_firefox_profile ( ) , capabilities = capabilities , executable_path = gecko_driver , firefox_options = firefox_options )
|
def render ( file ) :
"""Generate the result HTML ."""
|
fp = file . open ( )
content = fp . read ( )
fp . close ( )
notebook = nbformat . reads ( content . decode ( 'utf-8' ) , as_version = 4 )
html_exporter = HTMLExporter ( )
html_exporter . template_file = 'basic'
( body , resources ) = html_exporter . from_notebook_node ( notebook )
return body , resources
|
def xyz2angle ( x , y , z ) :
"""Convert cartesian to azimuth and zenith ."""
|
azi = xu . rad2deg ( xu . arctan2 ( x , y ) )
zen = 90 - xu . rad2deg ( xu . arctan2 ( z , xu . sqrt ( x ** 2 + y ** 2 ) ) )
return azi , zen
|
async def can_cast ( self , unit : Unit , ability_id : AbilityId , target : Optional [ Union [ Unit , Point2 , Point3 ] ] = None , only_check_energy_and_cooldown : bool = False , cached_abilities_of_unit : List [ AbilityId ] = None ) -> bool :
"""Tests if a unit has an ability available and enough energy to cast it .
See data _ pb2 . py ( line 161 ) for the numbers 1-5 to make sense"""
|
assert isinstance ( unit , Unit )
assert isinstance ( ability_id , AbilityId )
assert isinstance ( target , ( type ( None ) , Unit , Point2 , Point3 ) )
# check if unit has enough energy to cast or if ability is on cooldown
if cached_abilities_of_unit :
abilities = cached_abilities_of_unit
else :
abilities = ( await self . get_available_abilities ( [ unit ] ) ) [ 0 ]
if ability_id in abilities :
if only_check_energy_and_cooldown :
return True
cast_range = self . _game_data . abilities [ ability_id . value ] . _proto . cast_range
ability_target = self . _game_data . abilities [ ability_id . value ] . _proto . target
# Check if target is in range ( or is a self cast like stimpack )
if ability_target == 1 or ability_target == Target . PointOrNone . value and isinstance ( target , ( Point2 , Point3 ) ) and unit . distance_to ( target ) <= cast_range : # cant replace 1 with " Target . None . value " because " . None " doesnt seem to be a valid enum name
return True
# Check if able to use ability on a unit
elif ability_target in { Target . Unit . value , Target . PointOrUnit . value } and isinstance ( target , Unit ) and unit . distance_to ( target ) <= cast_range :
return True
# Check if able to use ability on a position
elif ability_target in { Target . Point . value , Target . PointOrUnit . value } and isinstance ( target , ( Point2 , Point3 ) ) and unit . distance_to ( target ) <= cast_range :
return True
return False
|
def parse ( self ) -> typing . Union [ list , dict , None ] :
"""Parse the BYML and get the root node with all children ."""
|
root_node_offset = self . _read_u32 ( 12 )
if root_node_offset == 0 :
return None
node_type = self . _data [ root_node_offset ]
if not _is_container_type ( node_type ) :
raise ValueError ( "Invalid root node: expected array or dict, got type 0x%x" % node_type )
return self . _parse_node ( node_type , 12 )
|
def sorted_bfs_edges ( G , source = None ) :
"""Produce edges in a breadth - first - search starting at source .
Neighbors appear in the order a linguist would expect in a syntax tree .
The result will only contain edges that express a dominance or spanning
relation , i . e . edges expressing pointing or precedence relations will
be ignored .
Parameters
G : DiscourseDocumentGraph
source : node
Specify starting node for breadth - first search and return edges in
the component reachable from source .
Returns
edges : generator
A generator of edges in the breadth - first - search ."""
|
if source is None :
source = G . root
xpos = horizontal_positions ( G , source )
visited = set ( [ source ] )
source_children = get_child_nodes ( G , source )
queue = deque ( [ ( source , iter ( sorted ( source_children , key = lambda x : xpos [ x ] ) ) ) ] )
while queue :
parent , children = queue [ 0 ]
try :
child = next ( children )
if child not in visited :
yield parent , child
visited . add ( child )
grandchildren = get_child_nodes ( G , child )
queue . append ( ( child , iter ( sorted ( grandchildren , key = lambda x : xpos [ x ] ) ) ) )
except StopIteration :
queue . popleft ( )
|
def dinfFlowDirection ( self , flow_dir_grid , slope_grid , pit_filled_elevation_grid = None ) :
"""Calculates flow direction with Dinf method"""
|
log ( "PROCESS: DinfFlowDirection" )
if pit_filled_elevation_grid :
self . pit_filled_elevation_grid = pit_filled_elevation_grid
# Construct the taudem command line .
cmd = [ os . path . join ( self . taudem_exe_path , 'dinfflowdir' ) , '-fel' , self . pit_filled_elevation_grid , '-ang' , flow_dir_grid , '-slp' , slope_grid , ]
self . _run_mpi_cmd ( cmd )
# create projection files
self . _add_prj_file ( self . pit_filled_elevation_grid , flow_dir_grid )
self . _add_prj_file ( self . pit_filled_elevation_grid , slope_grid )
|
def show ( self , filter = None ) :
"""Print the list of commands currently in the queue . If filter is
given , print only commands that match the filter ."""
|
for command in self . _commands :
if command [ 0 ] is None : # or command [ 1 ] in self . _ invalid _ objects :
continue
# Skip nill commands
if filter and command [ 0 ] != filter :
continue
t = [ ]
for e in command :
if isinstance ( e , np . ndarray ) :
t . append ( 'array %s' % str ( e . shape ) )
elif isinstance ( e , str ) :
s = e . strip ( )
if len ( s ) > 20 :
s = s [ : 18 ] + '... %i lines' % ( e . count ( '\n' ) + 1 )
t . append ( s )
else :
t . append ( e )
print ( tuple ( t ) )
|
def _set_aaa_config ( self , v , load = False ) :
"""Setter method for aaa _ config , mapped from YANG variable / aaa _ config ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ aaa _ config is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ aaa _ config ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = aaa_config . aaa_config , is_container = 'container' , presence = False , yang_name = "aaa-config" , rest_name = "" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'cli-drop-node-name' : None , u'sort-priority' : u'16' } } , namespace = 'urn:brocade.com:mgmt:brocade-aaa' , defining_module = 'brocade-aaa' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """aaa_config must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=aaa_config.aaa_config, is_container='container', presence=False, yang_name="aaa-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'sort-priority': u'16'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True)""" , } )
self . __aaa_config = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def fn_minimum_argcount ( callable ) :
"""Returns the minimum number of arguments that must be provided for the call to succeed ."""
|
fn = get_fn ( callable )
available_argcount = fn_available_argcount ( callable )
try :
return available_argcount - len ( fn . __defaults__ )
except TypeError :
return available_argcount
|
def owner_search_fields ( self ) :
"""Returns all the fields that are CharFields except for password from the
User model . For the built - in User model , that means username ,
first _ name , last _ name , and email ."""
|
try :
from django . contrib . auth import get_user_model
except ImportError : # Django < 1.5
from django . contrib . auth . models import User
else :
User = get_user_model ( )
return [ field . name for field in User . _meta . fields if isinstance ( field , models . CharField ) and field . name != 'password' ]
|
def replace_exceptions ( old_to_new_exceptions : Dict [ Type [ BaseException ] , Type [ BaseException ] ] ) -> Callable [ ... , Any ] :
"""Replaces old exceptions with new exceptions to be raised in their place ."""
|
old_exceptions = tuple ( old_to_new_exceptions . keys ( ) )
def decorator ( to_wrap : Callable [ ... , Any ] ) -> Callable [ ... , Any ] :
@ functools . wraps ( to_wrap )
# String type b / c pypy3 throws SegmentationFault with Iterable as arg on nested fn
# Ignore so we don ' t have to import ` Iterable `
def wrapper ( * args : Iterable [ Any ] , ** kwargs : Dict [ str , Any ] ) -> Callable [ ... , Any ] :
try :
return to_wrap ( * args , ** kwargs )
except old_exceptions as err :
try :
raise old_to_new_exceptions [ type ( err ) ] from err
except KeyError :
raise TypeError ( "could not look up new exception to use for %r" % err ) from err
return wrapper
return decorator
|
def save_file_list ( key , * files_refs ) :
"""Convert the given parameters to a special JSON object .
Each parameter is a file - refs specification of the form :
< file - path > : < reference1 > , < reference2 > , . . . ,
where the colon ' : ' and the list of references are optional .
JSON object is of the form :
{ key : { " file " : file _ path } } , or
{ key : { " file " : file _ path , " refs " : [ refs [ 0 ] , refs [ 1 ] , . . . ] } }"""
|
file_list = [ ]
for file_refs in files_refs :
if ':' in file_refs :
try :
file_name , refs = file_refs . split ( ':' )
except ValueError as e :
return error ( "Only one colon ':' allowed in file-refs specification." )
else :
file_name , refs = file_refs , None
if not os . path . isfile ( file_name ) :
return error ( "Output '{}' set to a missing file: '{}'." . format ( key , file_name ) )
file_obj = { 'file' : file_name }
if refs :
refs = [ ref_path . strip ( ) for ref_path in refs . split ( ',' ) ]
missing_refs = [ ref for ref in refs if not ( os . path . isfile ( ref ) or os . path . isdir ( ref ) ) ]
if len ( missing_refs ) > 0 :
return error ( "Output '{}' set to missing references: '{}'." . format ( key , ', ' . join ( missing_refs ) ) )
file_obj [ 'refs' ] = refs
file_list . append ( file_obj )
return json . dumps ( { key : file_list } )
|
def postinit ( self , target = None , value = None ) :
"""Do some setup after initialisation .
: param target : What is being assigned to .
: type target : NodeNG or None
: param value : The value being assigned to the variable .
: type : NodeNG or None"""
|
self . target = target
self . value = value
|
def setup_job ( manager , job_id , tool_id , tool_version , use_metadata = False ) :
"""Setup new job from these inputs and return dict summarizing state
( used to configure command line ) ."""
|
job_id = manager . setup_job ( job_id , tool_id , tool_version )
if use_metadata :
manager . enable_metadata_directory ( job_id )
return build_job_config ( job_id = job_id , job_directory = manager . job_directory ( job_id ) , system_properties = manager . system_properties ( ) , tool_id = tool_id , tool_version = tool_version )
|
def p_expr_XOR_expr ( p ) :
"""expr : expr XOR expr"""
|
p [ 0 ] = make_binary ( p . lineno ( 2 ) , 'XOR' , p [ 1 ] , p [ 3 ] , lambda x , y : ( x and not y ) or ( not x and y ) )
|
def _parse ( self , msg ) :
"""Parses a Scratch message and returns a tuple with the first element
as the message type , and the second element as the message payload . The
payload for a ' broadcast ' message is a string , and the payload for a
' sensor - update ' message is a dict whose keys are variables , and values
are updated variable values . Returns None if msg is not a message ."""
|
if not self . _is_msg ( msg ) :
return None
msg_type = msg [ self . prefix_len : ] . split ( ' ' ) [ 0 ]
if msg_type == 'broadcast' :
return ( 'broadcast' , self . _parse_broadcast ( msg ) )
else :
return ( 'sensor-update' , self . _parse_sensorupdate ( msg ) )
|
def bishop88 ( diode_voltage , photocurrent , saturation_current , resistance_series , resistance_shunt , nNsVth , d2mutau = 0 , NsVbi = np . Inf , gradients = False ) :
"""Explicit calculation of points on the IV curve described by the single
diode equation [ 1 ] _ .
. . warning : :
* Do not use ` ` d2mutau ` ` with CEC coefficients .
* Usage of ` ` d2mutau ` ` with PVSyst coefficients is required for cadmium -
telluride ( CdTe ) and amorphous - silicon ( a : Si ) PV modules only .
Parameters
diode _ voltage : numeric
diode voltages [ V ]
photocurrent : numeric
photo - generated current [ A ]
saturation _ current : numeric
diode reverse saturation current [ A ]
resistance _ series : numeric
series resistance [ ohms ]
resistance _ shunt : numeric
shunt resistance [ ohms ]
nNsVth : numeric
product of thermal voltage ` ` Vth ` ` [ V ] , diode ideality factor ` ` n ` ` ,
and number of series cells ` ` Ns ` `
d2mutau : numeric
PVSyst thin - film recombination parameter that is the ratio of thickness
of the intrinsic layer squared : math : ` d ^ 2 ` and the diffusion length of
charge carriers : math : ` \\ mu \\ tau ` , in volts [ V ] , defaults to 0 [ V ]
NsVbi : numeric
PVSyst thin - film recombination parameter that is the product of the PV
module number of series cells ` ` Ns ` ` and the builtin voltage ` ` Vbi ` ` of
the intrinsic layer , in volts [ V ] , defaults to ` ` np . inf ` `
gradients : bool
False returns only I , V , and P . True also returns gradients
Returns
tuple
currents [ A ] , voltages [ V ] , power [ W ] , and optionally
: math : ` \\ frac { dI } { dV _ d } ` , : math : ` \\ frac { dV } { dV _ d } ` ,
: math : ` \\ frac { dI } { dV } ` , : math : ` \\ frac { dP } { dV } ` , and
: math : ` \\ frac { d ^ 2 P } { dV dV _ d } `
Notes
The PVSyst thin - film recombination losses parameters ` ` d2mutau ` ` and
` ` NsVbi ` ` are only applied to cadmium - telluride ( CdTe ) and amorphous -
silicon ( a : Si ) PV modules , [ 2 ] _ , [ 3 ] _ . The builtin voltage : math : ` V _ { bi } `
should account for all junctions . For example : tandem and triple junction
cells would have builtin voltages of 1.8 [ V ] and 2.7 [ V ] respectively , based
on the default of 0.9 [ V ] for a single junction . The parameter ` ` NsVbi ` `
should only account for the number of series cells in a single parallel
sub - string if the module has cells in parallel greater than 1.
References
. . [ 1 ] " Computer simulation of the effects of electrical mismatches in
photovoltaic cell interconnection circuits " JW Bishop , Solar Cell ( 1988)
: doi : ` 10.1016/0379-6787(88)90059-2 `
. . [ 2 ] " Improved equivalent circuit and Analytical Model for Amorphous
Silicon Solar Cells and Modules . " J . Mertens , et al . , IEEE Transactions
on Electron Devices , Vol 45 , No 2 , Feb 1998.
: doi : ` 10.1109/16.658676 `
. . [ 3 ] " Performance assessment of a simulation model for PV modules of any
available technology " , André Mermoud and Thibault Lejeune , 25th EUPVSEC ,
2010
: doi : ` 10.4229/25thEUPVSEC2010-4BV . 1.114 `"""
|
# calculate recombination loss current where d2mutau > 0
is_recomb = d2mutau > 0
# True where there is thin - film recombination loss
v_recomb = np . where ( is_recomb , NsVbi - diode_voltage , np . inf )
i_recomb = np . where ( is_recomb , photocurrent * d2mutau / v_recomb , 0 )
# calculate temporary values to simplify calculations
v_star = diode_voltage / nNsVth
# non - dimensional diode voltage
g_sh = 1.0 / resistance_shunt
# conductance
i = ( photocurrent - saturation_current * np . expm1 ( v_star ) - diode_voltage * g_sh - i_recomb )
v = diode_voltage - i * resistance_series
retval = ( i , v , i * v )
if gradients : # calculate recombination loss current gradients where d2mutau > 0
grad_i_recomb = np . where ( is_recomb , i_recomb / v_recomb , 0 )
grad_2i_recomb = np . where ( is_recomb , 2 * grad_i_recomb / v_recomb , 0 )
g_diode = saturation_current * np . exp ( v_star ) / nNsVth
# conductance
grad_i = - g_diode - g_sh - grad_i_recomb
# di / dvd
grad_v = 1.0 - grad_i * resistance_series
# dv / dvd
# dp / dv = d ( iv ) / dv = v * di / dv + i
grad = grad_i / grad_v
# di / dv
grad_p = v * grad + i
# dp / dv
grad2i = - g_diode / nNsVth - grad_2i_recomb
# d2i / dvd
grad2v = - grad2i * resistance_series
# d2v / dvd
grad2p = ( grad_v * grad + v * ( grad2i / grad_v - grad_i * grad2v / grad_v ** 2 ) + grad_i )
# d2p / dv / dvd
retval += ( grad_i , grad_v , grad , grad_p , grad2p )
return retval
|
def send_KeyEvent ( self , key , down ) :
"""For most ordinary keys , the " keysym " is the same as the
corresponding ASCII value . Other common keys are shown in the
KEY _ constants ."""
|
self . sendMessage ( struct . pack ( '!BBxxI' , 4 , down , key ) )
|
def is_in_scope ( cls , want_scope : str , have_scopes : List [ Scope ] ) -> bool :
"""Return True if wanted scope is in list of scopes or derived scopes .
: param want _ scope : scope wanted for permission to do something ( str because could be invalid scope )
: param have _ scopes : list of valid scopes that user has been assigned"""
|
if not want_scope :
return True
if want_scope in have_scopes or want_scope . split ( ':' ) [ 0 ] in have_scopes :
return True
elif want_scope . startswith ( 'read' ) :
return cls . is_in_scope ( want_scope . replace ( 'read' , 'write' ) , have_scopes )
elif want_scope . startswith ( 'write' ) :
return cls . is_in_scope ( want_scope . replace ( 'write' , 'admin' ) , have_scopes )
else :
return False
|
def get_host ( self , name , default = NoHostError ) :
'''Get a single host by name .'''
|
if name in self . hosts :
return self . hosts [ name ]
if default is NoHostError :
raise NoHostError ( 'No such host: {0}' . format ( name ) )
return default
|
def redact_image ( self , parent , inspect_config = None , image_redaction_configs = None , include_findings = None , byte_item = None , retry = google . api_core . gapic_v1 . method . DEFAULT , timeout = google . api_core . gapic_v1 . method . DEFAULT , metadata = None , ) :
"""Redacts potentially sensitive info from an image .
This method has limits on input size , processing time , and output size .
See https : / / cloud . google . com / dlp / docs / redacting - sensitive - data - images to
learn more .
When no InfoTypes or CustomInfoTypes are specified in this request , the
system will automatically choose what detectors to run . By default this may
be all types , but may change over time as detectors are updated .
Example :
> > > from google . cloud import dlp _ v2
> > > client = dlp _ v2 . DlpServiceClient ( )
> > > parent = client . project _ path ( ' [ PROJECT ] ' )
> > > response = client . redact _ image ( parent )
Args :
parent ( str ) : The parent resource name , for example projects / my - project - id .
inspect _ config ( Union [ dict , ~ google . cloud . dlp _ v2 . types . InspectConfig ] ) : Configuration for the inspector .
If a dict is provided , it must be of the same form as the protobuf
message : class : ` ~ google . cloud . dlp _ v2 . types . InspectConfig `
image _ redaction _ configs ( list [ Union [ dict , ~ google . cloud . dlp _ v2 . types . ImageRedactionConfig ] ] ) : The configuration for specifying what content to redact from images .
If a dict is provided , it must be of the same form as the protobuf
message : class : ` ~ google . cloud . dlp _ v2 . types . ImageRedactionConfig `
include _ findings ( bool ) : Whether the response should include findings along with the redacted
image .
byte _ item ( Union [ dict , ~ google . cloud . dlp _ v2 . types . ByteContentItem ] ) : The content must be PNG , JPEG , SVG or BMP .
If a dict is provided , it must be of the same form as the protobuf
message : class : ` ~ google . cloud . dlp _ v2 . types . ByteContentItem `
retry ( Optional [ google . api _ core . retry . Retry ] ) : A retry object used
to retry requests . If ` ` None ` ` is specified , requests will not
be retried .
timeout ( Optional [ float ] ) : The amount of time , in seconds , to wait
for the request to complete . Note that if ` ` retry ` ` is
specified , the timeout applies to each individual attempt .
metadata ( Optional [ Sequence [ Tuple [ str , str ] ] ] ) : Additional metadata
that is provided to the method .
Returns :
A : class : ` ~ google . cloud . dlp _ v2 . types . RedactImageResponse ` instance .
Raises :
google . api _ core . exceptions . GoogleAPICallError : If the request
failed for any reason .
google . api _ core . exceptions . RetryError : If the request failed due
to a retryable error and retry attempts failed .
ValueError : If the parameters are invalid ."""
|
# Wrap the transport method to add retry and timeout logic .
if "redact_image" not in self . _inner_api_calls :
self . _inner_api_calls [ "redact_image" ] = google . api_core . gapic_v1 . method . wrap_method ( self . transport . redact_image , default_retry = self . _method_configs [ "RedactImage" ] . retry , default_timeout = self . _method_configs [ "RedactImage" ] . timeout , client_info = self . _client_info , )
request = dlp_pb2 . RedactImageRequest ( parent = parent , inspect_config = inspect_config , image_redaction_configs = image_redaction_configs , include_findings = include_findings , byte_item = byte_item , )
if metadata is None :
metadata = [ ]
metadata = list ( metadata )
try :
routing_header = [ ( "parent" , parent ) ]
except AttributeError :
pass
else :
routing_metadata = google . api_core . gapic_v1 . routing_header . to_grpc_metadata ( routing_header )
metadata . append ( routing_metadata )
return self . _inner_api_calls [ "redact_image" ] ( request , retry = retry , timeout = timeout , metadata = metadata )
|
def close ( self ) :
'''Closes connection with the q service .'''
|
if self . _connection :
self . _connection_file . close ( )
self . _connection_file = None
self . _connection . close ( )
self . _connection = None
|
def pvRvz ( self , vR , vz , R , z , gl = True , ngl = _DEFAULTNGL2 , vTmax = 1.5 ) :
"""NAME :
pvR
PURPOSE :
calculate the marginalized ( vR , vz ) probability at this location ( NOT normalized by the density )
INPUT :
vR - radial velocity ( can be Quantity )
vz - vertical velocity ( can be Quantity )
R - radius ( can be Quantity )
z - height ( can be Quantity )
gl - use Gauss - Legendre integration ( True , currently the only option )
ngl - order of Gauss - Legendre integration
vTmax - sets integration limits to [ 0 , vTmax ] for integration over vT ( default : 1.5)
OUTPUT :
p ( vR , vz , R , z )
HISTORY :
2013-01-02 - Written - Bovy ( IAS )
2018-01-12 - Added Gauss - Legendre integration prefactor vTmax / 2 - Trick ( MPA )"""
|
if gl :
if ngl % 2 == 1 :
raise ValueError ( "ngl must be even" )
# Use Gauss - Legendre integration for all
if ngl == _DEFAULTNGL :
glx , glw = self . _glxdef , self . _glwdef
glx12 , glw12 = self . _glxdef12 , self . _glwdef12
elif ngl == _DEFAULTNGL2 :
glx , glw = self . _glxdef2 , self . _glwdef2
glx12 , glw12 = self . _glxdef , self . _glwdef
else :
glx , glw = numpy . polynomial . legendre . leggauss ( ngl )
glx12 , glw12 = numpy . polynomial . legendre . leggauss ( ngl // 2 )
# Evaluate everywhere
vTgl = vTmax / 2. * ( glx + 1. )
vTglw = glw
vTfac = 0.5 * vTmax
# integration over [ 0 . , vTmax ]
# If inputs are arrays , tile
if isinstance ( R , numpy . ndarray ) :
nR = len ( R )
R = numpy . tile ( R , ( ngl , 1 ) ) . T . flatten ( )
z = numpy . tile ( z , ( ngl , 1 ) ) . T . flatten ( )
vR = numpy . tile ( vR , ( ngl , 1 ) ) . T . flatten ( )
vz = numpy . tile ( vz , ( ngl , 1 ) ) . T . flatten ( )
vTgl = numpy . tile ( vTgl , ( nR , 1 ) ) . flatten ( )
vTglw = numpy . tile ( vTglw , ( nR , 1 ) )
scalarOut = False
else :
R = R + numpy . zeros ( ngl )
vR = vR + numpy . zeros ( ngl )
z = z + numpy . zeros ( ngl )
vz = vz + numpy . zeros ( ngl )
nR = 1
scalarOut = True
# evaluate
logqeval = numpy . reshape ( self ( R , vR , vTgl , z , vz , log = True , use_physical = False ) , ( nR , ngl ) )
out = numpy . sum ( numpy . exp ( logqeval ) * vTglw * vTfac , axis = 1 )
if scalarOut :
return out [ 0 ]
else :
return out
|
def apply_func_to_select_indices_along_full_axis ( self , axis , func , indices , keep_remaining = False ) :
"""Applies a function to a select subset of full columns / rows .
Note : This should be used when you need to apply a function that relies
on some global information for the entire column / row , but only need
to apply a function to a subset .
Important : For your func to operate directly on the indices provided ,
it must use ` internal _ indices ` as a keyword argument .
Args :
axis : The axis to apply the function over ( 0 - rows , 1 - columns )
func : The function to apply .
indices : The global indices to apply the func to .
keep _ remaining : Whether or not to keep the other partitions .
Some operations may want to drop the remaining partitions and
keep only the results .
Returns :
A new BaseFrameManager object , the type of object that called this ."""
|
if self . partitions . size == 0 :
return self . __constructor__ ( np . array ( [ [ ] ] ) )
if isinstance ( indices , dict ) :
dict_indices = indices
indices = list ( indices . keys ( ) )
else :
dict_indices = None
if not isinstance ( indices , list ) :
indices = [ indices ]
partitions_dict = self . _get_dict_of_block_index ( axis , indices )
preprocessed_func = self . preprocess_func ( func )
# Since we might be keeping the remaining blocks that are not modified ,
# we have to also keep the block _ partitions object in the correct
# direction ( transpose for columns ) .
if not axis :
partitions_for_apply = self . column_partitions
partitions_for_remaining = self . partitions . T
else :
partitions_for_apply = self . row_partitions
partitions_for_remaining = self . partitions
# We may have a command to perform different functions on different
# columns at the same time . We attempt to handle this as efficiently as
# possible here . Functions that use this in the dictionary format must
# accept a keyword argument ` func _ dict ` .
if dict_indices is not None :
if not keep_remaining :
result = np . array ( [ partitions_for_apply [ i ] . apply ( preprocessed_func , func_dict = { idx : dict_indices [ idx ] for idx in partitions_dict [ i ] } , ) for i in partitions_dict ] )
else :
result = np . array ( [ partitions_for_remaining [ i ] if i not in partitions_dict else self . _apply_func_to_list_of_partitions ( preprocessed_func , partitions_for_apply [ i ] , func_dict = { idx : dict_indices [ idx ] for idx in partitions_dict [ i ] } , ) for i in range ( len ( partitions_for_apply ) ) ] )
else :
if not keep_remaining : # See notes in ` apply _ func _ to _ select _ indices `
result = np . array ( [ partitions_for_apply [ i ] . apply ( preprocessed_func , internal_indices = partitions_dict [ i ] ) for i in partitions_dict ] )
else : # See notes in ` apply _ func _ to _ select _ indices `
result = np . array ( [ partitions_for_remaining [ i ] if i not in partitions_dict else partitions_for_apply [ i ] . apply ( preprocessed_func , internal_indices = partitions_dict [ i ] ) for i in range ( len ( partitions_for_remaining ) ) ] )
return ( self . __constructor__ ( result . T ) if not axis else self . __constructor__ ( result ) )
|
def from_las ( cls , fname , remap = None , funcs = None , data = True , req = None , alias = None , encoding = None , printfname = False ) :
"""Constructor . Essentially just wraps ` ` from _ lasio ( ) ` ` , but is more
convenient for most purposes .
Args :
fname ( str ) : The path of the LAS file , or a URL to one .
remap ( dict ) : Optional . A dict of ' old ' : ' new ' LAS field names .
funcs ( dict ) : Optional . A dict of ' las field ' : function ( ) for
implementing a transform before loading . Can be a lambda .
printfname ( bool ) : prints filename before trying to load it , for
debugging
Returns :
well . The well object ."""
|
if printfname :
print ( fname )
if re . match ( r'https?://.+\..+/.+?' , fname ) is not None :
try :
data = urllib . request . urlopen ( fname ) . read ( ) . decode ( )
except urllib . HTTPError as e :
raise WellError ( 'Could not retrieve url: ' , e )
fname = ( StringIO ( data ) )
las = lasio . read ( fname , encoding = encoding )
# Pass to other constructor .
return cls . from_lasio ( las , remap = remap , funcs = funcs , data = data , req = req , alias = alias , fname = fname )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.