signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def double ( self ) :
"""Return a new point that is twice the old ."""
|
if self == INFINITY :
return INFINITY
# X9.62 B . 3:
p = self . __curve . p ( )
a = self . __curve . a ( )
l = ( ( 3 * self . __x * self . __x + a ) * numbertheory . inverse_mod ( 2 * self . __y , p ) ) % p
x3 = ( l * l - 2 * self . __x ) % p
y3 = ( l * ( self . __x - x3 ) - self . __y ) % p
return Point ( self . __curve , x3 , y3 )
|
def is_gauge ( md_type ) :
"""Whether a given MetricDescriptorType value is a gauge .
: type md _ type : int
: param md _ type : A MetricDescriptorType enum value ."""
|
if md_type not in metric_descriptor . MetricDescriptorType :
raise ValueError
# pragma : NO COVER
return md_type in { metric_descriptor . MetricDescriptorType . GAUGE_INT64 , metric_descriptor . MetricDescriptorType . GAUGE_DOUBLE , metric_descriptor . MetricDescriptorType . GAUGE_DISTRIBUTION }
|
def _send_breakpoint_condition_exception ( self , thread , conditional_breakpoint_exception_tuple ) :
"""If conditional breakpoint raises an exception during evaluation
send exception details to java"""
|
thread_id = get_thread_id ( thread )
# conditional _ breakpoint _ exception _ tuple - should contain 2 values ( exception _ type , stacktrace )
if conditional_breakpoint_exception_tuple and len ( conditional_breakpoint_exception_tuple ) == 2 :
exc_type , stacktrace = conditional_breakpoint_exception_tuple
int_cmd = InternalGetBreakpointException ( thread_id , exc_type , stacktrace )
self . post_internal_command ( int_cmd , thread_id )
|
def console_width ( kwargs ) :
"""" Determine console _ width ."""
|
if sys . platform . startswith ( 'win' ) :
console_width = _find_windows_console_width ( )
else :
console_width = _find_unix_console_width ( )
_width = kwargs . get ( 'width' , None )
if _width :
console_width = _width
else :
if not console_width :
console_width = 80
return console_width
|
def const_return ( func ) :
"""> > > from Redy . Magic . Classic import const _ return
> > > @ const _ return
> > > def f ( x ) :
> > > return x
> > > r1 = f ( 1)
> > > assert r1 is 1 and r1 is f ( 2)"""
|
result = _undef
def ret_call ( * args , ** kwargs ) :
nonlocal result
if result is _undef :
result = func ( * args , ** kwargs )
return result
return ret_call
|
def check_sockets ( self ) :
'''Check for new messages on sockets and respond accordingly .
. . versionchanged : : 0.11.3
Update routes table by setting ` ` df _ routes ` ` property of
: attr : ` parent . canvas _ slave ` .
. . versionchanged : : 0.12
Update ` ` dynamic _ electrode _ state _ shapes ` ` layer of
: attr : ` parent . canvas _ slave ` when dynamic electrode actuation states
change .
. . versionchanged : : 0.13
Update local global , electrode , and route command lists in response
to ` ` microdrop . command _ plugin ` ` messages .'''
|
try :
msg_frames = ( self . command_socket . recv_multipart ( zmq . NOBLOCK ) )
except zmq . Again :
pass
else :
self . on_command_recv ( msg_frames )
try :
msg_frames = ( self . subscribe_socket . recv_multipart ( zmq . NOBLOCK ) )
source , target , msg_type , msg_json = msg_frames
if ( ( source == 'microdrop.device_info_plugin' ) and ( msg_type == 'execute_reply' ) ) :
msg = json . loads ( msg_json )
if msg [ 'content' ] [ 'command' ] == 'get_device' :
data = decode_content_data ( msg )
if data is not None :
self . parent . on_device_loaded ( data )
elif ( ( source == 'microdrop.electrode_controller_plugin' ) and ( msg_type == 'execute_reply' ) ) :
msg = json . loads ( msg_json )
if msg [ 'content' ] [ 'command' ] in ( 'set_electrode_state' , 'set_electrode_states' ) :
data = decode_content_data ( msg )
if data is None :
print msg
else : # self . emit ( ' electrode - states - updated ' , data )
self . parent . on_electrode_states_updated ( data )
elif msg [ 'content' ] [ 'command' ] == 'get_channel_states' :
data = decode_content_data ( msg )
if data is None :
print msg
else : # self . emit ( ' electrode - states - set ' , data )
self . parent . on_electrode_states_set ( data )
elif ( ( source == 'droplet_planning_plugin' ) and ( msg_type == 'execute_reply' ) ) :
msg = json . loads ( msg_json )
if msg [ 'content' ] [ 'command' ] in ( 'add_route' , ) :
self . execute_async ( 'droplet_planning_plugin' , 'get_routes' )
elif msg [ 'content' ] [ 'command' ] in ( 'get_routes' , ) :
data = decode_content_data ( msg )
self . parent . canvas_slave . df_routes = data
elif ( ( source == 'microdrop.command_plugin' ) and ( msg_type == 'execute_reply' ) ) :
msg = json . loads ( msg_json )
if msg [ 'content' ] [ 'command' ] in ( 'get_commands' , 'unregister_command' , 'register_command' ) :
df_commands = decode_content_data ( msg ) . set_index ( 'namespace' )
for group_i , df_i in df_commands . groupby ( 'namespace' ) :
register = getattr ( self . parent . canvas_slave , 'register_%s_command' % group_i , None )
if register is None :
continue
else :
for j , command_ij in df_i . iterrows ( ) :
register ( command_ij . command_name , title = command_ij . title , group = command_ij . plugin_name )
_L ( ) . debug ( 'registered %s command: `%s`' , group_i , command_ij )
else :
self . most_recent = msg_json
except zmq . Again :
pass
except :
logger . error ( 'Error processing message from subscription ' 'socket.' , exc_info = True )
return True
|
def load_directory ( self , top_path , followlinks ) :
"""Traverse top _ path directory and save patterns in any . ddsignore files found .
: param top _ path : str : directory name we should traverse looking for ignore files
: param followlinks : boolean : should we traverse symbolic links"""
|
for dir_name , child_dirs , child_files in os . walk ( top_path , followlinks = followlinks ) :
for child_filename in child_files :
if child_filename == DDS_IGNORE_FILENAME :
pattern_lines = self . _read_non_empty_lines ( dir_name , child_filename )
self . add_patterns ( dir_name , pattern_lines )
|
def analysis_provenance_details_extractor ( impact_report , component_metadata ) :
"""Extracting provenance details of layers .
This extractor would be the main provenance details extractor which produce
tree view provenance details .
: param impact _ report : the impact report that acts as a proxy to fetch
all the data that extractor needed
: type impact _ report : safe . report . impact _ report . ImpactReport
: param component _ metadata : the component metadata . Used to obtain
information about the component we want to render
: type component _ metadata : safe . report . report _ metadata .
ReportComponentsMetadata
: return : context for rendering phase
: rtype : dict
. . versionadded : : 4.1"""
|
context = { }
extra_args = component_metadata . extra_args
provenance_format_args = resolve_from_dictionary ( extra_args , 'provenance_format' )
keywords_order = [ 'title' , 'source' , 'layer_purpose' , 'layer_geometry' , 'hazard' , 'exposure' , 'hazard_category' , 'exposure_unit' , 'value_map' , 'value_maps' , 'inasafe_fields' , 'inasafe_default_values' , 'layer_mode' , 'hazard_layer' , 'exposure_layer' , 'aggregation_layer' , 'keyword_version' , 'classification' , ]
use_rounding = impact_report . impact_function . use_rounding
debug_mode = impact_report . impact_function . debug_mode
# we define dict here to create a different object of keyword
hazard_keywords = dict ( impact_report . impact_function . provenance [ 'hazard_keywords' ] )
# hazard _ keywords doesn ' t have hazard _ layer path information
hazard_layer = QgsDataSourceUri . removePassword ( decode_full_layer_uri ( impact_report . impact_function . provenance . get ( provenance_hazard_layer [ 'provenance_key' ] ) ) [ 0 ] )
hazard_keywords [ 'hazard_layer' ] = hazard_layer
# keep only value maps with IF exposure
for keyword in [ 'value_maps' , 'thresholds' ] :
if hazard_keywords . get ( keyword ) :
temp_keyword = dict ( hazard_keywords [ keyword ] )
for key in temp_keyword :
if key not in impact_report . impact_function . provenance [ 'exposure_keywords' ] [ 'exposure' ] :
del hazard_keywords [ keyword ] [ key ]
header = resolve_from_dictionary ( provenance_format_args , 'hazard_header' )
hazard_provenance = { 'header' : header . title ( ) , 'provenances' : headerize ( sorted_keywords_by_order ( hazard_keywords , keywords_order ) ) }
# convert value if there is dict _ keywords
provenances = hazard_provenance [ 'provenances' ]
hazard_provenance [ 'provenances' ] = resolve_dict_keywords ( provenances )
# we define dict here to create a different object of keyword
exposure_keywords = dict ( impact_report . impact_function . provenance [ 'exposure_keywords' ] )
# exposure _ keywords doesn ' t have exposure _ layer path information
exposure_layer = QgsDataSourceUri . removePassword ( decode_full_layer_uri ( impact_report . impact_function . provenance . get ( provenance_exposure_layer [ 'provenance_key' ] ) ) [ 0 ] )
exposure_keywords [ 'exposure_layer' ] = exposure_layer
header = resolve_from_dictionary ( provenance_format_args , 'exposure_header' )
exposure_provenance = { 'header' : header . title ( ) , 'provenances' : headerize ( sorted_keywords_by_order ( exposure_keywords , keywords_order ) ) }
# convert value if there is dict _ keywords
provenances = exposure_provenance [ 'provenances' ]
exposure_provenance [ 'provenances' ] = resolve_dict_keywords ( provenances )
# aggregation keywords could be None so we don ' t define dict here
aggregation_keywords = impact_report . impact_function . provenance [ 'aggregation_keywords' ]
header = resolve_from_dictionary ( provenance_format_args , 'aggregation_header' )
aggregation_provenance = { 'header' : header . title ( ) , 'provenances' : None }
# only if aggregation layer used
if aggregation_keywords : # we define dict here to create a different object of keyword
aggregation_keywords = dict ( aggregation_keywords )
# aggregation _ keywords doesn ' t have aggregation _ layer path information
aggregation_layer = QgsDataSourceUri . removePassword ( decode_full_layer_uri ( impact_report . impact_function . provenance . get ( provenance_aggregation_layer [ 'provenance_key' ] ) ) [ 0 ] )
aggregation_keywords [ 'aggregation_layer' ] = aggregation_layer
aggregation_provenance [ 'provenances' ] = headerize ( sorted_keywords_by_order ( aggregation_keywords , keywords_order ) )
# convert value if there is dict _ keywords
provenances = aggregation_provenance [ 'provenances' ]
aggregation_provenance [ 'provenances' ] = resolve_dict_keywords ( provenances )
else :
aggregation_not_used = resolve_from_dictionary ( extra_args , [ 'defaults' , 'aggregation_not_used' ] )
aggregation_provenance [ 'provenances' ] = aggregation_not_used
all_provenance_keywords = dict ( impact_report . impact_function . provenance )
# we add debug mode information to the provenance
all_provenance_keywords [ provenance_use_rounding [ 'provenance_key' ] ] = ( 'On' if use_rounding else 'Off' )
all_provenance_keywords [ 'debug_mode' ] = 'On' if debug_mode else 'Off'
header = resolve_from_dictionary ( provenance_format_args , 'analysis_environment_header' )
analysis_environment_provenance_items = OrderedDict ( )
analysis_environment_provenance_keys = [ 'os' , 'inasafe_version' , provenance_use_rounding [ 'provenance_key' ] , 'debug_mode' , 'qgis_version' , 'qt_version' , 'gdal_version' , 'pyqt_version' ]
for item in analysis_environment_provenance_keys :
analysis_environment_provenance_items [ item ] = ( all_provenance_keywords [ item ] )
analysis_environment_provenance = { 'header' : header . title ( ) , 'provenances' : headerize ( analysis_environment_provenance_items ) }
impact_function_name = impact_report . impact_function . name
header = resolve_from_dictionary ( provenance_format_args , 'impact_function_header' )
impact_function_provenance = { 'header' : header . title ( ) , 'provenances' : impact_function_name }
provenance_detail = OrderedDict ( )
provenance_detail [ 'impact_function' ] = impact_function_provenance
provenance_detail [ 'hazard' ] = hazard_provenance
provenance_detail [ 'exposure' ] = exposure_provenance
provenance_detail [ 'aggregation' ] = aggregation_provenance
provenance_detail [ 'analysis_environment' ] = analysis_environment_provenance
analysis_details_header = resolve_from_dictionary ( extra_args , [ 'header' , 'analysis_detail' ] )
context [ 'component_key' ] = component_metadata . key
context . update ( { 'header' : analysis_details_header , 'details' : provenance_detail } )
return context
|
def wrap ( function , * args , ** kwargs ) :
'''Wrap a function that returns a request with some exception handling'''
|
try :
req = function ( * args , ** kwargs )
logger . debug ( 'Got %s: %s' , req . status_code , req . content )
if req . status_code == 200 :
return req
else :
raise ClientException ( req . reason , req . content )
except ClientException :
raise
except Exception as exc :
raise ClientException ( exc )
|
def to_rfc3339 ( timestamp ) :
"""Converts ` ` timestamp ` ` to an RFC 3339 date string format .
` ` timestamp ` ` can be either a ` ` datetime . datetime ` ` or a
` ` datetime . timedelta ` ` . Instances of the later are assumed to be a delta
with the beginining of the unix epoch , 1st of January , 1970
The returned string is always Z - normalized . Examples of the return format :
'1972-01-01T10:00:20.021Z '
Args :
timestamp ( datetime | timedelta ) : represents the timestamp to convert
Returns :
string : timestamp converted to a rfc3339 compliant string as above
Raises :
ValueError : if timestamp is not a datetime . datetime or datetime . timedelta"""
|
if isinstance ( timestamp , datetime . datetime ) :
timestamp = timestamp - _EPOCH_START
if not isinstance ( timestamp , datetime . timedelta ) :
_logger . error ( u'Could not convert %s to a rfc3339 time,' , timestamp )
raise ValueError ( u'Invalid timestamp type' )
return strict_rfc3339 . timestamp_to_rfc3339_utcoffset ( timestamp . total_seconds ( ) )
|
def compute_curl ( self , vector_field ) :
"""Computes the curl of a vector field over the mesh . While the vector
field is point - based , the curl will be cell - based . The approximation is
based on
. . math : :
n \\ cdot curl ( F ) = \\ lim _ { A \\ to 0 } | A | ^ { - 1 } < \\ int _ { dGamma } , F > dr ;
see < https : / / en . wikipedia . org / wiki / Curl _ ( mathematics ) > . Actually , to
approximate the integral , one would only need the projection of the
vector field onto the edges at the midpoint of the edges ."""
|
# Compute the projection of A on the edge at each edge midpoint .
# Take the average of ` vector _ field ` at the endpoints to get the
# approximate value at the edge midpoint .
A = 0.5 * numpy . sum ( vector_field [ self . idx_hierarchy ] , axis = 0 )
# sum of < edge , A > for all three edges
sum_edge_dot_A = numpy . einsum ( "ijk, ijk->j" , self . half_edge_coords , A )
# Get normalized vector orthogonal to triangle
z = numpy . cross ( self . half_edge_coords [ 0 ] , self . half_edge_coords [ 1 ] )
# Now compute
# curl = z / | | z | | * sum _ edge _ dot _ A / | A | .
# Since | | z | | = 2 * | A | , one can save a sqrt and do
# curl = z * sum _ edge _ dot _ A * 0.5 / | A | ^ 2.
curl = z * ( 0.5 * sum_edge_dot_A / self . cell_volumes ** 2 ) [ ... , None ]
return curl
|
def main ( self , ignored_argv = ( '' , ) ) :
"""Blocking main function for TensorBoard .
This method is called by ` tensorboard . main . run _ main ` , which is the
standard entrypoint for the tensorboard command line program . The
configure ( ) method must be called first .
Args :
ignored _ argv : Do not pass . Required for Abseil compatibility .
Returns :
Process exit code , i . e . 0 if successful or non - zero on failure . In
practice , an exception will most likely be raised instead of
returning non - zero .
: rtype : int"""
|
self . _install_signal_handler ( signal . SIGTERM , "SIGTERM" )
if self . flags . inspect :
logger . info ( 'Not bringing up TensorBoard, but inspecting event files.' )
event_file = os . path . expanduser ( self . flags . event_file )
efi . inspect ( self . flags . logdir , event_file , self . flags . tag )
return 0
if self . flags . version_tb :
print ( version . VERSION )
return 0
try :
server = self . _make_server ( )
sys . stderr . write ( 'TensorBoard %s at %s (Press CTRL+C to quit)\n' % ( version . VERSION , server . get_url ( ) ) )
sys . stderr . flush ( )
self . _register_info ( server )
server . serve_forever ( )
return 0
except TensorBoardServerException as e :
logger . error ( e . msg )
sys . stderr . write ( 'ERROR: %s\n' % e . msg )
sys . stderr . flush ( )
return - 1
|
def log_predictive_density ( self , x_test , y_test , Y_metadata = None ) :
"""Calculation of the log predictive density . Notice we add
the jacobian of the warping function here .
. . math :
p ( y _ { * } | D ) = p ( y _ { * } | f _ { * } ) p ( f _ { * } | \ mu _ { * } \\ sigma ^ { 2 } _ { * } )
: param x _ test : test locations ( x _ { * } )
: type x _ test : ( Nx1 ) array
: param y _ test : test observations ( y _ { * } )
: type y _ test : ( Nx1 ) array
: param Y _ metadata : metadata associated with the test points"""
|
mu_star , var_star = self . _raw_predict ( x_test )
fy = self . warping_function . f ( y_test )
ll_lpd = self . likelihood . log_predictive_density ( fy , mu_star , var_star , Y_metadata = Y_metadata )
return ll_lpd + np . log ( self . warping_function . fgrad_y ( y_test ) )
|
async def restore_storage_configuration ( self ) :
"""Restore machine ' s storage configuration to its initial state ."""
|
self . _data = await self . _handler . restore_storage_configuration ( system_id = self . system_id )
|
def _convertNonZeroToFailure ( self , res ) :
"utility method to handle the result of getProcessOutputAndValue"
|
( stdout , stderr , code ) = res
if code != 0 :
raise EnvironmentError ( 'command failed with exit code %d: %s' % ( code , stderr ) )
return ( stdout , stderr , code )
|
def add ( zpool , * vdevs , ** kwargs ) :
'''Add the specified vdev \' s to the given storage pool
zpool : string
Name of storage pool
vdevs : string
One or more devices
force : boolean
Forces use of device
CLI Example :
. . code - block : : bash
salt ' * ' zpool . add myzpool / path / to / vdev1 / path / to / vdev2 [ . . . ]'''
|
# # Configure pool
# NOTE : initialize the defaults
flags = [ ]
target = [ ]
# NOTE : set extra config based on kwargs
if kwargs . get ( 'force' , False ) :
flags . append ( '-f' )
# NOTE : append the pool name and specifications
target . append ( zpool )
target . extend ( vdevs )
# # Update storage pool
res = __salt__ [ 'cmd.run_all' ] ( __utils__ [ 'zfs.zpool_command' ] ( command = 'add' , flags = flags , target = target , ) , python_shell = False , )
ret = __utils__ [ 'zfs.parse_command_result' ] ( res , 'added' )
if ret [ 'added' ] : # # NOTE : lookup zpool status for vdev config
ret [ 'vdevs' ] = _clean_vdev_config ( __salt__ [ 'zpool.status' ] ( zpool = zpool ) [ zpool ] [ 'config' ] [ zpool ] , )
return ret
|
def from_object ( self , obj ) :
"""Update the values from the given object .
Objects are usually either modules or classes .
Just the uppercase variables in that object are stored in the config .
Example usage : :
from yourapplication import default _ config
app . config . from _ object ( default _ config )
You should not use this function to load the actual configuration but
rather configuration defaults . The actual config should be loaded
with : meth : ` from _ pyfile ` and ideally from a location not within the
package because the package might be installed system wide .
: param obj : an object holding the configuration"""
|
for key in dir ( obj ) :
if key . isupper ( ) :
self [ key ] = getattr ( obj , key )
|
def build ( self ) :
"""Creates the objects from the JSON response"""
|
if self . json [ 'sys' ] [ 'type' ] == 'Array' :
if any ( k in self . json for k in [ 'nextSyncUrl' , 'nextPageUrl' ] ) :
return SyncPage ( self . json , default_locale = self . default_locale , localized = True )
return self . _build_array ( )
return self . _build_single ( )
|
async def set_discovery_enabled ( self ) :
"""Enable bluetooth discoverablility ."""
|
endpoint = '/setup/bluetooth/discovery'
data = { "enable_discovery" : True }
url = API . format ( ip = self . _ipaddress , endpoint = endpoint )
try :
async with async_timeout . timeout ( 5 , loop = self . _loop ) :
response = await self . _session . post ( url , headers = HEADERS , data = json . dumps ( data ) )
_LOGGER . debug ( response . status )
except ( asyncio . TimeoutError , aiohttp . ClientError , socket . gaierror ) as error :
_LOGGER . error ( 'Error connecting to %s - %s' , self . _ipaddress , error )
|
def push ( self , targets , jobs = None , remote = None , show_checksums = False ) :
"""Push data items in a cloud - agnostic way .
Args :
targets ( list ) : list of targets to push to the cloud .
jobs ( int ) : number of jobs that can be running simultaneously .
remote ( dvc . remote . base . RemoteBase ) : optional remote to push to .
By default remote from core . remote config option is used .
show _ checksums ( bool ) : show checksums instead of file names in
information messages ."""
|
return self . repo . cache . local . push ( targets , jobs = jobs , remote = self . _get_cloud ( remote , "push" ) , show_checksums = show_checksums , )
|
def wait_for_logs_matching ( container , matcher , timeout = 10 , encoding = 'utf-8' , ** logs_kwargs ) :
"""Wait for matching log line ( s ) from the given container by streaming the
container ' s stdout and / or stderr outputs .
Each log line is decoded and any trailing whitespace is stripped before the
line is matched .
: param ~ docker . models . containers . Container container :
Container who ' s log lines to wait for .
: param matcher :
Callable that returns True once it has matched a decoded log line ( s ) .
: param timeout :
Timeout value in seconds .
: param encoding :
Encoding to use when decoding container output to strings .
: param logs _ kwargs :
Additional keyword arguments to pass to ` ` container . logs ( ) ` ` . For
example , the ` ` stdout ` ` and ` ` stderr ` ` boolean arguments can be used to
determine whether to stream stdout or stderr or both ( the default ) .
: returns :
The final matching log line .
: raises TimeoutError :
When the timeout value is reached before matching log lines have been
found .
: raises RuntimeError :
When all log lines have been consumed but matching log lines have not
been found ( the container must have stopped for its stream to have
ended without error ) ."""
|
try :
for line in stream_logs ( container , timeout = timeout , ** logs_kwargs ) : # Drop the trailing newline
line = line . decode ( encoding ) . rstrip ( )
if matcher ( line ) :
return line
except TimeoutError :
raise TimeoutError ( '\n' . join ( [ ( 'Timeout ({}s) waiting for logs matching {}.' . format ( timeout , matcher ) ) , 'Last few log lines:' , _last_few_log_lines ( container ) , ] ) )
raise RuntimeError ( '\n' . join ( [ 'Logs matching {} not found.' . format ( matcher ) , 'Last few log lines:' , _last_few_log_lines ( container ) , ] ) )
|
def delete_all_possible_task_files ( self , courseid , taskid ) :
"""Deletes all possibles task files in directory , to allow to change the format"""
|
if not id_checker ( courseid ) :
raise InvalidNameException ( "Course with invalid name: " + courseid )
if not id_checker ( taskid ) :
raise InvalidNameException ( "Task with invalid name: " + taskid )
task_fs = self . get_task_fs ( courseid , taskid )
for ext in self . get_available_task_file_extensions ( ) :
try :
task_fs . delete ( "task." + ext )
except :
pass
|
def return_an_error ( * args ) :
'''List of errors
Put all errors into a list of errors
ref : http : / / jsonapi . org / format / # errors
Args :
* args : A tuple contain errors
Returns :
A dictionary contains a list of errors'''
|
list_errors = [ ]
list_errors . extend ( list ( args ) )
errors = { 'errors' : list_errors }
return errors
|
def is_horz_aligned ( c ) :
"""Return True if all the components of c are horizontally aligned .
Horizontal alignment means that the bounding boxes of each Mention of c
shares a similar y - axis value in the visual rendering of the document .
: param c : The candidate to evaluate
: rtype : boolean"""
|
return all ( [ _to_span ( c [ i ] ) . sentence . is_visual ( ) and bbox_horz_aligned ( bbox_from_span ( _to_span ( c [ i ] ) ) , bbox_from_span ( _to_span ( c [ 0 ] ) ) ) for i in range ( len ( c ) ) ] )
|
def listen_error_messages_raylet ( worker , task_error_queue , threads_stopped ) :
"""Listen to error messages in the background on the driver .
This runs in a separate thread on the driver and pushes ( error , time )
tuples to the output queue .
Args :
worker : The worker class that this thread belongs to .
task _ error _ queue ( queue . Queue ) : A queue used to communicate with the
thread that prints the errors found by this thread .
threads _ stopped ( threading . Event ) : A threading event used to signal to
the thread that it should exit ."""
|
worker . error_message_pubsub_client = worker . redis_client . pubsub ( ignore_subscribe_messages = True )
# Exports that are published after the call to
# error _ message _ pubsub _ client . subscribe and before the call to
# error _ message _ pubsub _ client . listen will still be processed in the loop .
# Really we should just subscribe to the errors for this specific job .
# However , currently all errors seem to be published on the same channel .
error_pubsub_channel = str ( ray . gcs_utils . TablePubsub . ERROR_INFO ) . encode ( "ascii" )
worker . error_message_pubsub_client . subscribe ( error_pubsub_channel )
# worker . error _ message _ pubsub _ client . psubscribe ( " * " )
try : # Get the exports that occurred before the call to subscribe .
error_messages = global_state . error_messages ( worker . task_driver_id )
for error_message in error_messages :
logger . error ( error_message )
while True : # Exit if we received a signal that we should stop .
if threads_stopped . is_set ( ) :
return
msg = worker . error_message_pubsub_client . get_message ( )
if msg is None :
threads_stopped . wait ( timeout = 0.01 )
continue
gcs_entry = ray . gcs_utils . GcsTableEntry . GetRootAsGcsTableEntry ( msg [ "data" ] , 0 )
assert gcs_entry . EntriesLength ( ) == 1
error_data = ray . gcs_utils . ErrorTableData . GetRootAsErrorTableData ( gcs_entry . Entries ( 0 ) , 0 )
driver_id = error_data . DriverId ( )
if driver_id not in [ worker . task_driver_id . binary ( ) , DriverID . nil ( ) . binary ( ) ] :
continue
error_message = ray . utils . decode ( error_data . ErrorMessage ( ) )
if ( ray . utils . decode ( error_data . Type ( ) ) == ray_constants . TASK_PUSH_ERROR ) : # Delay it a bit to see if we can suppress it
task_error_queue . put ( ( error_message , time . time ( ) ) )
else :
logger . error ( error_message )
finally : # Close the pubsub client to avoid leaking file descriptors .
worker . error_message_pubsub_client . close ( )
|
def request_spot_instances ( self , price , image_id , count = 1 , type = 'one-time' , valid_from = None , valid_until = None , launch_group = None , availability_zone_group = None , key_name = None , security_groups = None , user_data = None , addressing_type = None , instance_type = 'm1.small' , placement = None , kernel_id = None , ramdisk_id = None , monitoring_enabled = False , subnet_id = None , block_device_map = None ) :
"""Request instances on the spot market at a particular price .
: type price : str
: param price : The maximum price of your bid
: type image _ id : string
: param image _ id : The ID of the image to run
: type count : int
: param count : The of instances to requested
: type type : str
: param type : Type of request . Can be ' one - time ' or ' persistent ' .
Default is one - time .
: type valid _ from : str
: param valid _ from : Start date of the request . An ISO8601 time string .
: type valid _ until : str
: param valid _ until : End date of the request . An ISO8601 time string .
: type launch _ group : str
: param launch _ group : If supplied , all requests will be fulfilled
as a group .
: type availability _ zone _ group : str
: param availability _ zone _ group : If supplied , all requests will be
fulfilled within a single
availability zone .
: type key _ name : string
: param key _ name : The name of the key pair with which to launch instances
: type security _ groups : list of strings
: param security _ groups : The names of the security groups with which to
associate instances
: type user _ data : string
: param user _ data : The user data passed to the launched instances
: type instance _ type : string
: param instance _ type : The type of instance to run :
* m1 . small
* m1 . large
* m1 . xlarge
* c1 . medium
* c1 . xlarge
* m2 . xlarge
* m2.2xlarge
* m2.4xlarge
* cc1.4xlarge
* t1 . micro
: type placement : string
: param placement : The availability zone in which to launch the instances
: type kernel _ id : string
: param kernel _ id : The ID of the kernel with which to launch the
instances
: type ramdisk _ id : string
: param ramdisk _ id : The ID of the RAM disk with which to launch the
instances
: type monitoring _ enabled : bool
: param monitoring _ enabled : Enable CloudWatch monitoring on the instance .
: type subnet _ id : string
: param subnet _ id : The subnet ID within which to launch the instances
for VPC .
: type block _ device _ map : : class : ` boto . ec2 . blockdevicemapping . BlockDeviceMapping `
: param block _ device _ map : A BlockDeviceMapping data structure
describing the EBS volumes associated
with the Image .
: rtype : Reservation
: return : The : class : ` boto . ec2 . spotinstancerequest . SpotInstanceRequest `
associated with the request for machines"""
|
params = { 'LaunchSpecification.ImageId' : image_id , 'Type' : type , 'SpotPrice' : price }
if count :
params [ 'InstanceCount' ] = count
if valid_from :
params [ 'ValidFrom' ] = valid_from
if valid_until :
params [ 'ValidUntil' ] = valid_until
if launch_group :
params [ 'LaunchGroup' ] = launch_group
if availability_zone_group :
params [ 'AvailabilityZoneGroup' ] = availability_zone_group
if key_name :
params [ 'LaunchSpecification.KeyName' ] = key_name
if security_groups :
l = [ ]
for group in security_groups :
if isinstance ( group , SecurityGroup ) :
l . append ( group . name )
else :
l . append ( group )
self . build_list_params ( params , l , 'LaunchSpecification.SecurityGroup' )
if user_data :
params [ 'LaunchSpecification.UserData' ] = base64 . b64encode ( user_data )
if addressing_type :
params [ 'LaunchSpecification.AddressingType' ] = addressing_type
if instance_type :
params [ 'LaunchSpecification.InstanceType' ] = instance_type
if placement :
params [ 'LaunchSpecification.Placement.AvailabilityZone' ] = placement
if kernel_id :
params [ 'LaunchSpecification.KernelId' ] = kernel_id
if ramdisk_id :
params [ 'LaunchSpecification.RamdiskId' ] = ramdisk_id
if monitoring_enabled :
params [ 'LaunchSpecification.Monitoring.Enabled' ] = 'true'
if subnet_id :
params [ 'LaunchSpecification.SubnetId' ] = subnet_id
if block_device_map :
block_device_map . build_list_params ( params , 'LaunchSpecification.' )
return self . get_list ( 'RequestSpotInstances' , params , [ ( 'item' , SpotInstanceRequest ) ] , verb = 'POST' )
|
def _get_host_switches ( self , host_id ) :
"""Get switch IPs from configured host mapping .
This method is used to extract switch information
from transactions where VNIC _ TYPE is normal .
Information is extracted from ini file which
is stored in _ nexus _ switches .
: param host _ id : host _ name from transaction
: returns : list of all switches
: returns : list of only switches which are active"""
|
all_switches = set ( )
active_switches = set ( )
try :
host_list = nxos_db . get_host_mappings ( host_id )
for mapping in host_list :
all_switches . add ( mapping . switch_ip )
if self . is_switch_active ( mapping . switch_ip ) :
active_switches . add ( mapping . switch_ip )
except excep . NexusHostMappingNotFound :
pass
return list ( all_switches ) , list ( active_switches )
|
def det_residual ( model , guess , start , final , shocks , diff = True , jactype = 'sparse' ) :
'''Computes the residuals , the derivatives of the stacked - time system .
: param model : an fga model
: param guess : the guess for the simulated values . An ` ( n _ s . n _ x ) x N ` array ,
where n _ s is the number of states ,
n _ x the number of controls , and ` N ` the length of the simulation .
: param start : initial boundary condition ( initial value of the states )
: param final : final boundary condition ( last value of the controls )
: param shocks : values for the exogenous shocks
: param diff : if True , the derivatives are computes
: return : a list with two elements :
- an ` ( n _ s . n _ x ) x N ` array with the residuals of the system
- a ` ( n _ s . n _ x ) x N x ( n _ s . n _ x ) x N ` array representing the jacobian of
the system'''
|
# TODO : compute a sparse derivative and ensure the solvers can deal with it
n_s = len ( model . symbols [ 'states' ] )
n_x = len ( model . symbols [ 'controls' ] )
# n _ e = len ( model . symbols [ ' shocks ' ] )
N = guess . shape [ 0 ]
p = model . calibration [ 'parameters' ]
f = model . functions [ 'arbitrage' ]
g = model . functions [ 'transition' ]
vec = guess [ : - 1 , : ]
vec_f = guess [ 1 : , : ]
s = vec [ : , : n_s ]
x = vec [ : , n_s : ]
S = vec_f [ : , : n_s ]
X = vec_f [ : , n_s : ]
m = shocks [ : - 1 , : ]
M = shocks [ 1 : , : ]
if diff :
SS , SS_m , SS_s , SS_x , SS_M = g ( m , s , x , M , p , diff = True )
R , R_m , R_s , R_x , R_M , R_S , R_X = f ( m , s , x , M , S , X , p , diff = True )
else :
SS = g ( m , s , x , M , p )
R = f ( m , s , x , M , S , X , p )
res_s = SS - S
res_x = R
res = np . zeros ( ( N , n_s + n_x ) )
res [ 1 : , : n_s ] = res_s
res [ : - 1 , n_s : ] = res_x
res [ 0 , : n_s ] = - ( guess [ 0 , : n_s ] - start )
res [ - 1 , n_s : ] = - ( guess [ - 1 , n_s : ] - guess [ - 2 , n_s : ] )
if not diff :
return res
else :
sparse_jac = False
if not sparse_jac : # we compute the derivative matrix
res_s_s = SS_s
res_s_x = SS_x
# next block is probably very inefficient
jac = np . zeros ( ( N , n_s + n_x , N , n_s + n_x ) )
for i in range ( N - 1 ) :
jac [ i , n_s : , i , : n_s ] = R_s [ i , : , : ]
jac [ i , n_s : , i , n_s : ] = R_x [ i , : , : ]
jac [ i , n_s : , i + 1 , : n_s ] = R_S [ i , : , : ]
jac [ i , n_s : , i + 1 , n_s : ] = R_X [ i , : , : ]
jac [ i + 1 , : n_s , i , : n_s ] = SS_s [ i , : , : ]
jac [ i + 1 , : n_s , i , n_s : ] = SS_x [ i , : , : ]
jac [ i + 1 , : n_s , i + 1 , : n_s ] = - np . eye ( n_s )
# jac [ i , n _ s : , i , : n _ s ] = R _ s [ i , : , : ]
# jac [ i , n _ s : , i , n _ s : ] = R _ x [ i , : , : ]
# jac [ i + 1 , n _ s : , i , : n _ s ] = R _ S [ i , : , : ]
# jac [ i + 1 , n _ s : , i , n _ s : ] = R _ X [ i , : , : ]
# jac [ i , : n _ s , i + 1 , : n _ s ] = SS _ s [ i , : , : ]
# jac [ i , : n _ s , i + 1 , n _ s : ] = SS _ x [ i , : , : ]
# jac [ i + 1 , : n _ s , i + 1 , : n _ s ] = - np . eye ( n _ s )
jac [ 0 , : n_s , 0 , : n_s ] = - np . eye ( n_s )
jac [ - 1 , n_s : , - 1 , n_s : ] = - np . eye ( n_x )
jac [ - 1 , n_s : , - 2 , n_s : ] = + np . eye ( n_x )
nn = jac . shape [ 0 ] * jac . shape [ 1 ]
res = res . ravel ( )
jac = jac . reshape ( ( nn , nn ) )
if jactype == 'sparse' :
from scipy . sparse import csc_matrix , csr_matrix
jac = csc_matrix ( jac )
# scipy bug ? I don ' t get the same with csr
return [ res , jac ]
|
def GetNextWrittenEventSource ( self ) :
"""Retrieves the next event source that was written after open .
Returns :
EventSource : event source or None if there are no newly written ones .
Raises :
IOError : when the storage writer is closed .
OSError : when the storage writer is closed ."""
|
if not self . _storage_file :
raise IOError ( 'Unable to read from closed storage writer.' )
event_source = self . _storage_file . GetEventSourceByIndex ( self . _written_event_source_index )
if event_source :
self . _written_event_source_index += 1
return event_source
|
def issue ( self , issue_instance_id ) :
"""Select an issue .
Parameters :
issue _ instance _ id : int id of the issue instance to select
Note : We are selecting issue instances , even though the command is called
issue ."""
|
with self . db . make_session ( ) as session :
selected_issue = ( session . query ( IssueInstance ) . filter ( IssueInstance . id == issue_instance_id ) . scalar ( ) )
if selected_issue is None :
self . warning ( f"Issue {issue_instance_id} doesn't exist. " "Type 'issues' for available issues." )
return
self . sources = self . _get_leaves_issue_instance ( session , issue_instance_id , SharedTextKind . SOURCE )
self . sinks = self . _get_leaves_issue_instance ( session , issue_instance_id , SharedTextKind . SINK )
self . current_issue_instance_id = int ( selected_issue . id )
self . current_frame_id = - 1
self . current_trace_frame_index = 1
# first one after the source
print ( f"Set issue to {issue_instance_id}." )
if int ( selected_issue . run_id ) != self . current_run_id :
self . current_run_id = int ( selected_issue . run_id )
print ( f"Set run to {self.current_run_id}." )
print ( )
self . _generate_trace_from_issue ( )
self . show ( )
|
def has_credentials_stored ( ) :
"""Return ' auth token ' string , if the user credentials are already stored"""
|
try :
with open ( credentials_file , 'r' ) as f :
token = f . readline ( ) . strip ( )
id = f . readline ( ) . strip ( )
return token
except Exception , e :
return False
|
def _set_alarm_sample ( self , v , load = False ) :
"""Setter method for alarm _ sample , mapped from YANG variable / rmon / alarm _ entry / alarm _ sample ( alarm - sample - type )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ alarm _ sample is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ alarm _ sample ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = RestrictedClassType ( base_type = unicode , restriction_type = "dict_key" , restriction_arg = { u'delta' : { 'value' : 2 } , u'absolute' : { 'value' : 1 } } , ) , is_leaf = True , yang_name = "alarm-sample" , rest_name = "type" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'cli-drop-node-name' : None , u'alt-name' : u'type' , u'cli-incomplete-command' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-rmon' , defining_module = 'brocade-rmon' , yang_type = 'alarm-sample-type' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """alarm_sample must be of a type compatible with alarm-sample-type""" , 'defined-type' : "brocade-rmon:alarm-sample-type" , 'generated-type' : """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'delta': {'value': 2}, u'absolute': {'value': 1}},), is_leaf=True, yang_name="alarm-sample", rest_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'alt-name': u'type', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-rmon', defining_module='brocade-rmon', yang_type='alarm-sample-type', is_config=True)""" , } )
self . __alarm_sample = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def run_analyze_dimension_and_radius ( data , rmin , rmax , nradii , adjacency_method = 'brute' , adjacency_kwds = { } , fit_range = None , savefig = False , plot_name = 'dimension_plot.png' ) :
"""This function is used to estimate the doubling dimension ( approximately equal to the intrinsic
dimension ) by computing a graph of neighborhood radius versus average number of neighbors .
The " radius " refers to the truncation constant where all distances greater than
a specified radius are taken to be infinite . This is used for example in the
truncated Gaussian kernel in estimate _ radius . py
Parameters
data : numpy array ,
Original data set for which we are estimating the bandwidth
rmin : float ,
smallest radius to consider
rmax : float ,
largest radius to consider
nradii : int ,
number of radii between rmax and rmin to consider
adjacency _ method : string ,
megaman adjacency method to use , default ' brute ' see geometry . py for details
adjacency _ kwds : dict ,
dictionary of keywords for adjacency method
fit _ range : list of ints ,
range of radii to consider default is range ( nradii ) , i . e . all of them
savefig : bool ,
whether to save the radius vs . neighbors figure
plot _ name : string ,
filename of the figure to be saved as .
Returns
results : dictionary
contains the radii , average nieghbors , min and max number of neighbors and number
of points with no neighbors .
dim : float ,
estimated doubling dimension ( used as an estimate of the intrinsic dimension )"""
|
n , D = data . shape
radii = 10 ** ( np . linspace ( np . log10 ( rmin ) , np . log10 ( rmax ) , nradii ) )
dists = compute_largest_radius_distance ( data , rmax , adjacency_method , adjacency_kwds )
results = neighborhood_analysis ( dists , radii )
avg_neighbors = results [ 'avg_neighbors' ] . flatten ( )
radii = results [ 'radii' ] . flatten ( )
if fit_range is None :
fit_range = range ( len ( radii ) )
dim = find_dimension_plot ( avg_neighbors , radii , fit_range , savefig , plot_name )
return ( results , dim )
|
def as_dict ( self ) :
"""returns a dictionary representation of the block and of all
component options"""
|
# TODO / FIXME : add selected information
if self . hidden :
rdict = { }
else :
def_selected = self . selected ( )
comps = [ { 'name' : comp . name , 'default' : comp . name in self . defaults , 'options' : comp . get_ordered_options ( ) if isinstance ( comp , Optionable ) else None } for comp in self ]
rdict = { 'name' : self . name , 'required' : self . required , 'multiple' : self . multiple , 'args' : self . in_name , 'returns' : self . out_name , 'components' : comps }
return rdict
|
def _derive_distinct_intervals ( self , rows ) :
"""Returns the set of distinct intervals in a row set .
: param list [ dict [ str , T ] ] rows : The rows set .
: rtype : set [ ( int , int ) ]"""
|
ret = set ( )
for row in rows :
self . _add_interval ( ret , ( row [ self . _key_start_date ] , row [ self . _key_end_date ] ) )
return ret
|
def is_valid_matrix ( matrix ) :
"""Determines if a given n x n matrix is valid . A valid matrix is defined as a matrix in which every row
and every column contains all the integers from 1 to n ( inclusive ) .
Args :
matrix ( list of list of int ) : The input n x n matrix to be checked for validity .
Returns :
bool : True if the matrix is valid , False otherwise .
Examples :
> > > is _ valid _ matrix ( [ [ 1,2,3 ] , [ 3,1,2 ] , [ 2,3,1 ] ] )
True
> > > is _ valid _ matrix ( [ [ 1,1,1 ] , [ 1,2,3 ] , [ 1,2,3 ] ] )
False"""
|
m = len ( matrix )
n = len ( matrix [ 0 ] )
for i in range ( m ) :
s = set ( )
for j in range ( n ) :
s . add ( matrix [ i ] [ j ] )
if len ( s ) != n :
return False
for j in range ( n ) :
s = set ( )
for i in range ( m ) :
s . add ( matrix [ i ] [ j ] )
if len ( s ) != m :
return False
return True
|
def findAll ( self , strSeq ) :
"""Same as find but returns a list of all occurences"""
|
arr = self . encode ( strSeq )
lst = [ ]
lst = self . _kmp_find ( arr [ 0 ] , self , lst )
return lst
|
def AddAsMessage ( self , rdfvalue_in , source , mutation_pool = None ) :
"""Helper method to add rdfvalues as GrrMessages for testing ."""
|
self . Add ( rdf_flows . GrrMessage ( payload = rdfvalue_in , source = source ) , mutation_pool = mutation_pool )
|
def get_changelog_date_packager ( self ) :
"""Returns part of the changelog entry , containing date and packager ."""
|
try :
packager = subprocess . Popen ( 'rpmdev-packager' , stdout = subprocess . PIPE ) . communicate ( ) [ 0 ] . strip ( )
except OSError : # Hi John Doe , you should install rpmdevtools
packager = "John Doe <john@doe.com>"
logger . warn ( "Package rpmdevtools is missing, using default " "name: {0}." . format ( packager ) )
with utils . c_time_locale ( ) :
date_str = time . strftime ( '%a %b %d %Y' , time . gmtime ( ) )
encoding = locale . getpreferredencoding ( )
return u'{0} {1}' . format ( date_str , packager . decode ( encoding ) )
|
def format_body ( self , description , sys_info = None , traceback = None ) :
"""Formats the body in plain text . ( add a series of ' - ' under each section
title ) .
: param description : Description of the issue , written by the user .
: param sys _ info : Optional system information string
: param log : Optional application log
: param traceback : Optional traceback ."""
|
name = 'Description'
delim = '-' * 40
body = BODY_ITEM_TEMPLATE % { 'name' : name , 'value' : description , 'delim' : delim }
if traceback :
name = 'Traceback'
traceback = '\n' . join ( traceback . splitlines ( ) [ - NB_LINES_MAX : ] )
body += BODY_ITEM_TEMPLATE % { 'name' : name , 'value' : traceback , 'delim' : delim }
if sys_info :
name = 'System information'
body += BODY_ITEM_TEMPLATE % { 'name' : name , 'value' : sys_info , 'delim' : delim }
return body
|
def reverse ( self ) :
"""Reverse the order of all items in the dictionary .
Example :
omd = omdict ( [ ( 1,1 ) , ( 1,11 ) , ( 1,111 ) , ( 2,2 ) , ( 3,3 ) ] )
omd . reverse ( )
omd . allitems ( ) = = [ ( 3,3 ) , ( 2,2 ) , ( 1,111 ) , ( 1,11 ) , ( 1,1 ) ]
Returns : < self > ."""
|
for key in six . iterkeys ( self . _map ) :
self . _map [ key ] . reverse ( )
self . _items . reverse ( )
return self
|
def send ( self , * sender , ** kwargs ) :
"""Emit this signal on behalf of ` sender ` , passing on kwargs .
This is an extension of ` Signal . send ` that changes one thing :
Exceptions raised in calling the receiver are logged but do not fail"""
|
if len ( sender ) == 0 :
sender = None
elif len ( sender ) > 1 :
raise TypeError ( 'send() accepts only one positional argument, ' '%s given' % len ( sender ) )
else :
sender = sender [ 0 ]
if not self . receivers :
return [ ]
rv = list ( )
for receiver in self . receivers_for ( sender ) :
try :
rv . append ( ( receiver , receiver ( sender , ** kwargs ) ) )
except Exception :
logger . exception ( 'Error while dispatching signal "{}" ' 'to receiver' . format ( self . name ) )
return rv
|
def tob ( data , enc = 'utf8' ) :
"""Convert anything to bytes"""
|
return data . encode ( enc ) if isinstance ( data , six . text_type ) else bytes ( data )
|
def extract_table ( self , source , destination_uris , job_id = None , job_id_prefix = None , location = None , project = None , job_config = None , retry = DEFAULT_RETRY , ) :
"""Start a job to extract a table into Cloud Storage files .
See
https : / / cloud . google . com / bigquery / docs / reference / rest / v2 / jobs # configuration . extract
Arguments :
source ( Union [ : class : ` google . cloud . bigquery . table . Table ` , : class : ` google . cloud . bigquery . table . TableReference ` , src , ] ) :
Table to be extracted .
destination _ uris ( Union [ str , Sequence [ str ] ] ) :
URIs of Cloud Storage file ( s ) into which table data is to be
extracted ; in format
` ` gs : / / < bucket _ name > / < object _ name _ or _ glob > ` ` .
Keyword Arguments :
job _ id ( str ) : ( Optional ) The ID of the job .
job _ id _ prefix ( str )
( Optional ) the user - provided prefix for a randomly generated
job ID . This parameter will be ignored if a ` ` job _ id ` ` is
also given .
location ( str ) :
Location where to run the job . Must match the location of the
source table .
project ( str ) :
Project ID of the project of where to run the job . Defaults
to the client ' s project .
job _ config ( google . cloud . bigquery . job . ExtractJobConfig ) :
( Optional ) Extra configuration options for the job .
retry ( google . api _ core . retry . Retry ) :
( Optional ) How to retry the RPC .
: type source : : class : ` google . cloud . bigquery . table . TableReference `
: param source : table to be extracted .
Returns :
google . cloud . bigquery . job . ExtractJob : A new extract job instance ."""
|
job_id = _make_job_id ( job_id , job_id_prefix )
if project is None :
project = self . project
if location is None :
location = self . location
job_ref = job . _JobReference ( job_id , project = project , location = location )
source = _table_arg_to_table_ref ( source , default_project = self . project )
if isinstance ( destination_uris , six . string_types ) :
destination_uris = [ destination_uris ]
extract_job = job . ExtractJob ( job_ref , source , destination_uris , client = self , job_config = job_config )
extract_job . _begin ( retry = retry )
return extract_job
|
def make_vertical_bar ( percentage , width = 1 ) :
"""Draws a vertical bar made of unicode characters .
: param value : A value between 0 and 100
: param width : How many characters wide the bar should be .
: returns : Bar as a String"""
|
bar = ' _▁▂▃▄▅▆▇█'
percentage //= 10
percentage = int ( percentage )
if percentage < 0 :
output = bar [ 0 ]
elif percentage >= len ( bar ) :
output = bar [ - 1 ]
else :
output = bar [ percentage ]
return output * width
|
def WriteFileFooter ( self ) :
"""Writes file footer ( finishes the file ) ."""
|
if self . cur_file_size != self . cur_info . size :
raise IOError ( "Incorrect file size: st_size=%d, but written %d bytes." % ( self . cur_info . size , self . cur_file_size ) )
# TODO ( user ) : pytype : BLOCKSIZE / NUL constants are not visible to type
# checker .
# pytype : disable = module - attr
blocks , remainder = divmod ( self . cur_file_size , tarfile . BLOCKSIZE )
if remainder > 0 :
self . _tar_fd . fileobj . write ( tarfile . NUL * ( tarfile . BLOCKSIZE - remainder ) )
blocks += 1
self . _tar_fd . offset += blocks * tarfile . BLOCKSIZE
# pytype : enable = module - attr
self . _ResetState ( )
return self . _stream . GetValueAndReset ( )
|
def send_meta_data ( socket , conf , name ) :
'''Sends the config via ZeroMQ to a specified socket . Is called at the beginning of a run and when the config changes . Conf can be any config dictionary .'''
|
meta_data = dict ( name = name , conf = conf )
try :
socket . send_json ( meta_data , flags = zmq . NOBLOCK )
except zmq . Again :
pass
|
def remover ( self , id_divisiondc ) :
"""Remove Division Dc from by the identifier .
: param id _ divisiondc : Identifier of the Division Dc . Integer value and greater than zero .
: return : None
: raise InvalidParameterError : The identifier of Division Dc is null and invalid .
: raise DivisaoDcNaoExisteError : Division Dc not registered .
: raise DataBaseError : Networkapi failed to access the database .
: raise XMLError : Networkapi failed to generate the XML response ."""
|
if not is_valid_int_param ( id_divisiondc ) :
raise InvalidParameterError ( u'The identifier of Division Dc is invalid or was not informed.' )
url = 'divisiondc/' + str ( id_divisiondc ) + '/'
code , xml = self . submit ( None , 'DELETE' , url )
return self . response ( code , xml )
|
def create ( self , ** kwargs ) :
"""Creates a new statement matching the keyword arguments specified .
Returns the created statement ."""
|
Statement = self . get_model ( 'statement' )
Tag = self . get_model ( 'tag' )
tags = kwargs . pop ( 'tags' , [ ] )
if 'search_text' not in kwargs :
kwargs [ 'search_text' ] = self . tagger . get_bigram_pair_string ( kwargs [ 'text' ] )
if 'search_in_response_to' not in kwargs :
if kwargs . get ( 'in_response_to' ) :
kwargs [ 'search_in_response_to' ] = self . tagger . get_bigram_pair_string ( kwargs [ 'in_response_to' ] )
statement = Statement ( ** kwargs )
statement . save ( )
tags_to_add = [ ]
for _tag in tags :
tag , _ = Tag . objects . get_or_create ( name = _tag )
tags_to_add . append ( tag )
statement . tags . add ( * tags_to_add )
return statement
|
def read ( self , size ) :
"""read size bytes and return them"""
|
done = 0
# number of bytes that have been read so far
v = ''
while True :
if size - done < len ( self . _buffer [ 'data' ] ) - self . _buffer_pos :
v += self . _buffer [ 'data' ] [ self . _buffer_pos : self . _buffer_pos + ( size - done ) ]
self . _buffer_pos += ( size - done )
# self . pointer + = size
return v
else : # we need more buffer
vpart = self . _buffer [ 'data' ] [ self . _buffer_pos : ]
self . _buffer = self . _load_block ( )
v += vpart
self . _buffer_pos = 0
if len ( self . _buffer [ 'data' ] ) == 0 :
return v
done += len ( vpart )
|
def get_upper_triangle ( correlation_matrix ) :
'''Extract upper triangle from a square matrix . Negative values are
set to 0.
Args :
correlation _ matrix ( pandas df ) : Correlations between all replicates
Returns :
upper _ tri _ df ( pandas df ) : Upper triangle extracted from
correlation _ matrix ; rid is the row index , cid is the column index ,
corr is the extracted correlation value'''
|
upper_triangle = correlation_matrix . where ( np . triu ( np . ones ( correlation_matrix . shape ) , k = 1 ) . astype ( np . bool ) )
# convert matrix into long form description
upper_tri_df = upper_triangle . stack ( ) . reset_index ( level = 1 )
upper_tri_df . columns = [ 'rid' , 'corr' ]
# Index at this point is cid , it now becomes a column
upper_tri_df . reset_index ( level = 0 , inplace = True )
# Get rid of negative values
upper_tri_df [ 'corr' ] = upper_tri_df [ 'corr' ] . clip ( lower = 0 )
return upper_tri_df . round ( rounding_precision )
|
def schedule_from_proto_dicts ( device : 'xmon_device.XmonDevice' , ops : Iterable [ Dict ] , ) -> Schedule :
"""Convert proto dictionaries into a Schedule for the given device ."""
|
scheduled_ops = [ ]
last_time_picos = 0
for op in ops :
delay_picos = 0
if 'incremental_delay_picoseconds' in op :
delay_picos = op [ 'incremental_delay_picoseconds' ]
time_picos = last_time_picos + delay_picos
last_time_picos = time_picos
xmon_op = xmon_op_from_proto_dict ( op )
scheduled_ops . append ( ScheduledOperation . op_at_on ( operation = xmon_op , time = Timestamp ( picos = time_picos ) , device = device , ) )
return Schedule ( device , scheduled_ops )
|
def menu_clean ( menu_config ) :
"""Make sure that only the menu item with the largest weight is active .
If a child of a menu item is active , the parent should be active too .
: param menu :
: return :"""
|
max_weight = - 1
for _ , value in list ( menu_config . items ( ) ) :
if value [ "submenu" ] :
for _ , v in list ( value [ "submenu" ] . items ( ) ) :
if v [ "active" ] : # parent inherits the weight of the axctive child
value [ "active" ] = True
value [ "active_weight" ] = v [ "active_weight" ]
if value [ "active" ] :
max_weight = max ( value [ "active_weight" ] , max_weight )
if max_weight > 0 : # one of the items is active : make items with lesser weight inactive
for _ , value in list ( menu_config . items ( ) ) :
if value [ "active" ] and value [ "active_weight" ] < max_weight :
value [ "active" ] = False
return menu_config
|
def get_times_from_utterance ( utterance : str , char_offset_to_token_index : Dict [ int , int ] , indices_of_approximate_words : Set [ int ] ) -> Dict [ str , List [ int ] ] :
"""Given an utterance , we get the numbers that correspond to times and convert them to
values that may appear in the query . For example : convert ` ` 7pm ` ` to ` ` 1900 ` ` ."""
|
pm_linking_dict = _time_regex_match ( r'\d+pm' , utterance , char_offset_to_token_index , pm_map_match_to_query_value , indices_of_approximate_words )
am_linking_dict = _time_regex_match ( r'\d+am' , utterance , char_offset_to_token_index , am_map_match_to_query_value , indices_of_approximate_words )
oclock_linking_dict = _time_regex_match ( r"\d+ o'clock" , utterance , char_offset_to_token_index , lambda match : digit_to_query_time ( match . rstrip ( " o'clock" ) ) , indices_of_approximate_words )
hours_linking_dict = _time_regex_match ( r"\d+ hours" , utterance , char_offset_to_token_index , lambda match : [ int ( match . rstrip ( " hours" ) ) ] , indices_of_approximate_words )
times_linking_dict : Dict [ str , List [ int ] ] = defaultdict ( list )
linking_dicts = [ pm_linking_dict , am_linking_dict , oclock_linking_dict , hours_linking_dict ]
for linking_dict in linking_dicts :
for key , value in linking_dict . items ( ) :
times_linking_dict [ key ] . extend ( value )
return times_linking_dict
|
def simplify_expression ( txt ) :
"""Remove all unecessary whitespace and some very usual space"""
|
minimal = re . sub ( r'\s' , ' ' , re . sub ( r'\s(?=\W)' , '' , re . sub ( r'(?<=\W)\s' , '' , txt . strip ( ) ) ) )
# add space before some " ( " and after some " ) "
return re . sub ( r'\)(?=\w)' , ') ' , re . sub ( r'(,|\b(?:{}))\(' . format ( '|' . join ( RESERVED_WORDS ) ) , '\\1 (' , minimal ) )
|
def _style_to_basic_html_attributes ( self , element , style_content , force = False ) :
"""given an element and styles like
' background - color : red ; font - family : Arial ' turn some of that into HTML
attributes . like ' bgcolor ' , etc .
Note , the style _ content can contain pseudoclasses like :
' { color : red ; border : 1px solid green } : visited { border : 1px solid green } '"""
|
if style_content . count ( "}" ) and style_content . count ( "{" ) == style_content . count ( "}" ) :
style_content = style_content . split ( "}" ) [ 0 ] [ 1 : ]
attributes = OrderedDict ( )
for key , value in [ x . split ( ":" ) for x in style_content . split ( ";" ) if len ( x . split ( ":" ) ) == 2 ] :
key = key . strip ( )
if key == "text-align" :
attributes [ "align" ] = value . strip ( )
elif key == "vertical-align" :
attributes [ "valign" ] = value . strip ( )
elif key == "background-color" and "transparent" not in value . lower ( ) : # Only add the ' bgcolor ' attribute if the value does not
# contain the word " transparent " ; before we add it possibly
# correct the 3 - digit color code to its 6 - digit equivalent
# ( " abc " to " aabbcc " ) so IBM Notes copes .
attributes [ "bgcolor" ] = self . six_color ( value . strip ( ) )
elif key == "width" or key == "height" :
value = value . strip ( )
if value . endswith ( "px" ) :
value = value [ : - 2 ]
attributes [ key ] = value
for key , value in attributes . items ( ) :
if ( key in element . attrib and not force or key in self . disable_basic_attributes ) : # already set , don ' t dare to overwrite
continue
element . attrib [ key ] = value
|
def update_data ( ) :
"""Update data sent by background process to global allData"""
|
global allData
while not q . empty ( ) :
allData = q . get ( )
for key , value in tags . items ( ) :
if key in allData :
allData [ key ] [ 'name' ] = value
|
def sort_recursive ( data ) :
"""Recursively sorts all elements in a dictionary
Args :
data ( dict ) : The dictionary to sort
Returns :
sorted _ dict ( OrderedDict ) : The sorted data dict"""
|
newdict = { }
for i in data . items ( ) :
if type ( i [ 1 ] ) is dict :
newdict [ i [ 0 ] ] = sort_recursive ( i [ 1 ] )
else :
newdict [ i [ 0 ] ] = i [ 1 ]
return OrderedDict ( sorted ( newdict . items ( ) , key = lambda item : ( compare_type ( type ( item [ 1 ] ) ) , item [ 0 ] ) ) )
|
def download ( cls , filename , input_dir , dl_dir = None ) :
"""Download the resource from the storage ."""
|
file_info = cls . parse_remote ( filename )
if not dl_dir :
dl_dir = os . path . join ( input_dir , file_info . container , os . path . dirname ( file_info . blob ) )
utils . safe_makedir ( dl_dir )
out_file = os . path . join ( dl_dir , os . path . basename ( file_info . blob ) )
if not utils . file_exists ( out_file ) :
with file_transaction ( { } , out_file ) as tx_out_file :
blob_service = cls . connect ( filename )
blob_service . get_blob_to_path ( container_name = file_info . container , blob_name = file_info . blob , file_path = tx_out_file )
return out_file
|
def set_stop_handler ( self ) :
"""Initializes functions that are invoked when the user or OS wants to kill this process .
: return :"""
|
signal . signal ( signal . SIGTERM , self . graceful_stop )
signal . signal ( signal . SIGABRT , self . graceful_stop )
signal . signal ( signal . SIGINT , self . graceful_stop )
|
async def field ( self , elem = None , elem_type = None , params = None ) :
"""Archive field
: param elem :
: param elem _ type :
: param params :
: return :"""
|
elem_type = elem_type if elem_type else elem . __class__
fvalue = None
src = elem
if issubclass ( elem_type , x . UVarintType ) :
fvalue = await self . uvarint ( x . get_elem ( src ) )
elif issubclass ( elem_type , x . IntType ) :
fvalue = await self . uint ( elem = x . get_elem ( src ) , elem_type = elem_type , params = params )
elif issubclass ( elem_type , x . BlobType ) :
fvalue = await self . blob ( elem = x . get_elem ( src ) , elem_type = elem_type , params = params )
elif issubclass ( elem_type , x . UnicodeType ) :
fvalue = await self . unicode_type ( x . get_elem ( src ) )
elif issubclass ( elem_type , x . VariantType ) :
fvalue = await self . variant ( elem = x . get_elem ( src ) , elem_type = elem_type , params = params )
elif issubclass ( elem_type , x . ContainerType ) : # container ~ simple list
fvalue = await self . container ( container = x . get_elem ( src ) , container_type = elem_type , params = params )
elif issubclass ( elem_type , x . TupleType ) : # tuple ~ simple list
fvalue = await self . tuple ( elem = x . get_elem ( src ) , elem_type = elem_type , params = params )
elif issubclass ( elem_type , x . MessageType ) :
fvalue = await self . message ( x . get_elem ( src ) , msg_type = elem_type )
else :
raise TypeError
return fvalue if self . writing else x . set_elem ( elem , fvalue )
|
def cygpath ( filename ) :
"""Convert a cygwin path into a windows style path"""
|
if sys . platform == 'cygwin' :
proc = Popen ( [ 'cygpath' , '-am' , filename ] , stdout = PIPE )
return proc . communicate ( ) [ 0 ] . strip ( )
else :
return filename
|
def _update_state_from_response ( self , response_json ) :
""": param response _ json : the json obj returned from query
: return :"""
|
_response_json = response_json . get ( 'data' )
if _response_json is not None :
self . json_state = _response_json
return True
return False
|
def edges_to_path ( edges ) :
"""Connect edges and return a path ."""
|
if not edges :
return None
G = edges_to_graph ( edges )
path = nx . topological_sort ( G )
return path
|
def ex ( mt , x ) :
"""ex : Returns the curtate expectation of life . Life expectancy"""
|
sum1 = 0
for j in mt . lx [ x + 1 : - 1 ] :
sum1 += j
# print sum1
try :
return sum1 / mt . lx [ x ] + 0.5
except :
return 0
|
def trace_symlink_target ( link ) :
"""Given a file that is known to be a symlink , trace it to its ultimate
target .
Raises TargetNotPresent when the target cannot be determined .
Raises ValueError when the specified link is not a symlink ."""
|
if not is_symlink ( link ) :
raise ValueError ( "link must point to a symlink on the system" )
while is_symlink ( link ) :
orig = os . path . dirname ( link )
link = readlink ( link )
link = resolve_path ( link , orig )
return link
|
def afx_adafactor ( ) :
"""Adafactor with recommended learning rate schedule ."""
|
hparams = afx_adam ( )
hparams . optimizer = "Adafactor"
hparams . learning_rate_schedule = "rsqrt_decay"
hparams . learning_rate_warmup_steps = 10000
return hparams
|
def record_entering ( self , time , code , frame_key , parent_stats ) :
"""Entered to a function call ."""
|
stats = parent_stats . ensure_child ( code , RecordingStatistics )
self . _times_entered [ ( code , frame_key ) ] = time
stats . own_hits += 1
|
def certs ( self ) :
"""List of the certificates contained in the structure"""
|
certstack = libcrypto . CMS_get1_certs ( self . ptr )
if certstack is None :
raise CMSError ( "getting certs" )
return StackOfX509 ( ptr = certstack , disposable = True )
|
def ExtractEvents ( self , parser_mediator , registry_key , ** kwargs ) :
"""Extracts events from a Windows Registry key .
Args :
parser _ mediator ( ParserMediator ) : mediates interactions between parsers
and other components , such as storage and dfvfs .
registry _ key ( dfwinreg . WinRegistryKey ) : Windows Registry key ."""
|
self . _ParseLogonApplications ( parser_mediator , registry_key )
self . _ParseRegisteredDLLs ( parser_mediator , registry_key )
|
def _find_field_generator_templates ( self ) :
"""Return a dictionary of the form { name : field _ generator } containing
all tohu generators defined in the class and instance namespace
of this custom generator ."""
|
field_gen_templates = { }
# Extract field generators from class dict
for name , g in self . __class__ . __dict__ . items ( ) :
if isinstance ( g , TohuBaseGenerator ) :
field_gen_templates [ name ] = g . set_tohu_name ( f'{name} (TPL)' )
# Extract field generators from instance dict
for name , g in self . __dict__ . items ( ) :
if isinstance ( g , TohuBaseGenerator ) :
field_gen_templates [ name ] = g . set_tohu_name ( f'{name} (TPL)' )
return field_gen_templates
|
def workflow_script_import ( self ) :
"""Create objects from valid ARImport"""
|
bsc = getToolByName ( self , 'bika_setup_catalog' )
client = self . aq_parent
title = _ ( 'Submitting Sample Import' )
description = _ ( 'Creating and initialising objects' )
bar = ProgressBar ( self , self . REQUEST , title , description )
notify ( InitialiseProgressBar ( bar ) )
profiles = [ x . getObject ( ) for x in bsc ( portal_type = 'AnalysisProfile' ) ]
gridrows = self . schema [ 'SampleData' ] . get ( self )
row_cnt = 0
for therow in gridrows :
row = deepcopy ( therow )
row_cnt += 1
# Profiles are titles , profile keys , or UIDS : convert them to UIDs .
newprofiles = [ ]
for title in row [ 'Profiles' ] :
objects = [ x for x in profiles if title in ( x . getProfileKey ( ) , x . UID ( ) , x . Title ( ) ) ]
for obj in objects :
newprofiles . append ( obj . UID ( ) )
row [ 'Profiles' ] = newprofiles
# Same for analyses
newanalyses = set ( self . get_row_services ( row ) + self . get_row_profile_services ( row ) )
# get batch
batch = self . schema [ 'Batch' ] . get ( self )
if batch :
row [ 'Batch' ] = batch . UID ( )
# Add AR fields from schema into this row ' s data
row [ 'ClientReference' ] = self . getClientReference ( )
row [ 'ClientOrderNumber' ] = self . getClientOrderNumber ( )
contact_uid = self . getContact ( ) . UID ( ) if self . getContact ( ) else None
row [ 'Contact' ] = contact_uid
# Creating analysis request from gathered data
ar = create_analysisrequest ( client , self . REQUEST , row , analyses = list ( newanalyses ) , )
# progress marker update
progress_index = float ( row_cnt ) / len ( gridrows ) * 100
progress = ProgressState ( self . REQUEST , progress_index )
notify ( UpdateProgressEvent ( progress ) )
# document has been written to , and redirect ( ) fails here
self . REQUEST . response . write ( '<script>document.location.href="%s"</script>' % ( self . absolute_url ( ) ) )
|
def get_date_range ( year = None , month = None , day = None ) :
"""Return a start . . end range to query for a specific month , day or year ."""
|
if year is None :
return None
if month is None : # year only
start = datetime ( year , 1 , 1 , 0 , 0 , 0 , tzinfo = utc )
end = datetime ( year , 12 , 31 , 23 , 59 , 59 , 999 , tzinfo = utc )
return ( start , end )
if day is None : # year + month only
start = datetime ( year , month , 1 , 0 , 0 , 0 , tzinfo = utc )
end = start + timedelta ( days = monthrange ( year , month ) [ 1 ] , microseconds = - 1 )
return ( start , end )
else : # Exact day
start = datetime ( year , month , day , 0 , 0 , 0 , tzinfo = utc )
end = start + timedelta ( days = 1 , microseconds = - 1 )
return ( start , end )
|
def _parse_ethtool_opts ( opts , iface ) :
'''Filters given options and outputs valid settings for ETHTOOLS _ OPTS
If an option has a value that is not expected , this
function will log what the Interface , Setting and what it was
expecting .'''
|
config = { }
if 'autoneg' in opts :
if opts [ 'autoneg' ] in _CONFIG_TRUE :
config . update ( { 'autoneg' : 'on' } )
elif opts [ 'autoneg' ] in _CONFIG_FALSE :
config . update ( { 'autoneg' : 'off' } )
else :
_raise_error_iface ( iface , 'autoneg' , _CONFIG_TRUE + _CONFIG_FALSE )
if 'duplex' in opts :
valid = [ 'full' , 'half' ]
if opts [ 'duplex' ] in valid :
config . update ( { 'duplex' : opts [ 'duplex' ] } )
else :
_raise_error_iface ( iface , 'duplex' , valid )
if 'speed' in opts :
valid = [ '10' , '100' , '1000' , '10000' ]
if six . text_type ( opts [ 'speed' ] ) in valid :
config . update ( { 'speed' : opts [ 'speed' ] } )
else :
_raise_error_iface ( iface , opts [ 'speed' ] , valid )
valid = _CONFIG_TRUE + _CONFIG_FALSE
for option in ( 'rx' , 'tx' , 'sg' , 'tso' , 'ufo' , 'gso' , 'gro' , 'lro' ) :
if option in opts :
if opts [ option ] in _CONFIG_TRUE :
config . update ( { option : 'on' } )
elif opts [ option ] in _CONFIG_FALSE :
config . update ( { option : 'off' } )
else :
_raise_error_iface ( iface , option , valid )
return config
|
def _set_fcoe_get_interface ( self , v , load = False ) :
"""Setter method for fcoe _ get _ interface , mapped from YANG variable / brocade _ fcoe _ ext _ rpc / fcoe _ get _ interface ( rpc )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ fcoe _ get _ interface is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ fcoe _ get _ interface ( ) directly .
YANG Description : This function is to get the operational state of an FCoE
interface ( s ) ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = fcoe_get_interface . fcoe_get_interface , is_leaf = True , yang_name = "fcoe-get-interface" , rest_name = "fcoe-get-interface" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = False , extensions = { u'tailf-common' : { u'hidden' : u'rpccmd' , u'actionpoint' : u'fcoe-show-action-point' } } , namespace = 'urn:brocade.com:mgmt:brocade-fcoe-ext' , defining_module = 'brocade-fcoe-ext' , yang_type = 'rpc' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """fcoe_get_interface must be of a type compatible with rpc""" , 'defined-type' : "rpc" , 'generated-type' : """YANGDynClass(base=fcoe_get_interface.fcoe_get_interface, is_leaf=True, yang_name="fcoe-get-interface", rest_name="fcoe-get-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'fcoe-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='rpc', is_config=True)""" , } )
self . __fcoe_get_interface = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def get_threshold ( self ) :
"""Return threshold based on current state ."""
|
value_map = dict ( )
for key , value in list ( self . threshold_classes . items ( ) ) :
value_map [ key ] = [ value [ 0 ] . value ( ) , value [ 1 ] . value ( ) , ]
return value_map
|
def loadInputs ( self , filename , cols = None , everyNrows = 1 , delim = ' ' , checkEven = 1 ) :
"""Loads inputs from file . Patterning is lost ."""
|
self . loadInputsFromFile ( filename , cols , everyNrows , delim , checkEven )
|
def get ( self ) :
"""Get the highest priority Processing Block from the queue ."""
|
with self . _mutex :
entry = self . _queue . pop ( )
del self . _block_map [ entry [ 2 ] ]
return entry [ 2 ]
|
def _set_affected_target_count_in_runtracker ( self ) :
"""Sets the realized target count in the run tracker ' s daemon stats object ."""
|
target_count = len ( self . build_graph )
self . run_tracker . pantsd_stats . set_affected_targets_size ( target_count )
return target_count
|
def fetchone ( self , query , * args ) :
"""Returns the first result of the given query .
: param query : The query to be executed as a ` str ` .
: param params : A ` tuple ` of parameters that will be replaced for
placeholders in the query .
: return : The retrieved row with each field being one element in a
` tuple ` ."""
|
cursor = self . connection . cursor ( )
try :
cursor . execute ( query , args )
return cursor . fetchone ( )
finally :
cursor . close ( )
|
def Match ( self , registry_key ) :
"""Determines if a Windows Registry key matches the filter .
Args :
registry _ key ( dfwinreg . WinRegistryKey ) : Windows Registry key .
Returns :
bool : True if the keys match ."""
|
value_names = frozenset ( [ registry_value . name for registry_value in registry_key . GetValues ( ) ] )
return self . _value_names . issubset ( value_names )
|
def get_string ( self , betas : List [ float ] , gammas : List [ float ] , samples : int = 100 ) :
"""Compute the most probable string .
The method assumes you have passed init _ betas and init _ gammas with your
pre - computed angles or you have run the VQE loop to determine the
angles . If you have not done this you will be returning the output for
a random set of angles .
: param betas : List of beta angles
: param gammas : List of gamma angles
: param samples : ( Optional ) number of samples to get back from the QuantumComputer .
: returns : tuple representing the bitstring , Counter object from
collections holding all output bitstrings and their frequency ."""
|
if samples <= 0 and not isinstance ( samples , int ) :
raise ValueError ( "samples variable must be positive integer" )
param_prog = self . get_parameterized_program ( )
stacked_params = np . hstack ( ( betas , gammas ) )
sampling_prog = Program ( )
ro = sampling_prog . declare ( 'ro' , 'BIT' , len ( self . qubits ) )
sampling_prog += param_prog ( stacked_params )
sampling_prog += [ MEASURE ( qubit , r ) for qubit , r in zip ( self . qubits , ro ) ]
sampling_prog . wrap_in_numshots_loop ( samples )
executable = self . qc . compile ( sampling_prog )
bitstring_samples = self . qc . run ( executable )
bitstring_tuples = list ( map ( tuple , bitstring_samples ) )
freq = Counter ( bitstring_tuples )
most_frequent_bit_string = max ( freq , key = lambda x : freq [ x ] )
return most_frequent_bit_string , freq
|
def isValid ( self ) :
"""Returns if the current Reference Sample is valid . This is , the sample
hasn ' t neither been expired nor disposed ."""
|
today = DateTime ( )
expiry_date = self . getExpiryDate ( )
if expiry_date and today > expiry_date :
return False
# TODO : Do We really need ExpiryDate + DateExpired ? Any difference ?
date_expired = self . getDateExpired ( )
if date_expired and today > date_expired :
return False
date_disposed = self . getDateDisposed ( )
if date_disposed and today > date_disposed :
return False
return True
|
def knn_impute_reference ( X , missing_mask , k , verbose = False , print_interval = 100 ) :
"""Reference implementation of kNN imputation logic ."""
|
n_rows , n_cols = X . shape
X_result , D , effective_infinity = knn_initialize ( X , missing_mask , verbose = verbose )
for i in range ( n_rows ) :
for j in np . where ( missing_mask [ i , : ] ) [ 0 ] :
distances = D [ i , : ] . copy ( )
# any rows that don ' t have the value we ' re currently trying
# to impute are set to infinite distances
distances [ missing_mask [ : , j ] ] = effective_infinity
neighbor_indices = np . argsort ( distances )
neighbor_distances = distances [ neighbor_indices ]
# get rid of any infinite distance neighbors in the top k
valid_distances = neighbor_distances < effective_infinity
neighbor_distances = neighbor_distances [ valid_distances ] [ : k ]
neighbor_indices = neighbor_indices [ valid_distances ] [ : k ]
weights = 1.0 / neighbor_distances
weight_sum = weights . sum ( )
if weight_sum > 0 :
column = X [ : , j ]
values = column [ neighbor_indices ]
X_result [ i , j ] = np . dot ( values , weights ) / weight_sum
return X_result
|
def get_default_jvm_opts ( tmp_dir = None , parallel_gc = False ) :
"""Retrieve default JVM tuning options
Avoids issues with multiple spun up Java processes running into out of memory errors .
Parallel GC can use a lot of cores on big machines and primarily helps reduce task latency
and responsiveness which are not needed for batch jobs .
https : / / github . com / bcbio / bcbio - nextgen / issues / 532 # issuecomment - 50989027
https : / / wiki . csiro . au / pages / viewpage . action ? pageId = 545034311
http : / / stackoverflow . com / questions / 9738911 / javas - serial - garbage - collector - performing - far - better - than - other - garbage - collect
However , serial GC causes issues with Spark local runs so we use parallel for those cases :
https : / / github . com / broadinstitute / gatk / issues / 3605 # issuecomment - 332370070"""
|
opts = [ "-XX:+UseSerialGC" ] if not parallel_gc else [ ]
if tmp_dir :
opts . append ( "-Djava.io.tmpdir=%s" % tmp_dir )
return opts
|
def _tabulate ( rows , headers , spacing = 5 ) :
"""Prepare simple table with spacing based on content"""
|
if len ( rows ) == 0 :
return "None\n"
assert len ( rows [ 0 ] ) == len ( headers )
count = len ( rows [ 0 ] )
widths = [ 0 for _ in range ( count ) ]
rows = [ headers ] + rows
for row in rows :
for index , field in enumerate ( row ) :
if len ( str ( field ) ) > widths [ index ] :
widths [ index ] = len ( str ( field ) )
output = ""
for row in rows :
for index , field in enumerate ( row ) :
field = str ( field )
output += field + ( widths [ index ] - len ( field ) + spacing ) * " "
output += "\n"
return output
|
def generate_wakeword_pieces ( self , volume ) :
"""Generates chunks of audio that represent the wakeword stream"""
|
while True :
target = 1 if random ( ) > 0.5 else 0
it = self . pos_files_it if target else self . neg_files_it
sample_file = next ( it )
yield self . layer_with ( self . normalize_volume_to ( load_audio ( sample_file ) , volume ) , target )
yield self . layer_with ( np . zeros ( int ( pr . sample_rate * ( 0.5 + 2.0 * random ( ) ) ) ) , 0 )
|
def _init_or_update_registered_service_if_needed ( self ) :
'''similar to _ init _ or _ update _ service _ if _ needed but we get service _ registraion from registry ,
so we can update only registered services'''
|
if ( self . is_service_initialized ( ) ) :
old_reg = self . _read_service_info ( self . args . org_id , self . args . service_id )
# metadataURI will be in old _ reg only for service which was initilized from registry ( not from metadata )
# we do nothing for services which were initilized from metadata
if ( "metadataURI" not in old_reg ) :
return
service_registration = self . _get_service_registration ( )
# if metadataURI hasn ' t been changed we do nothing
if ( not self . is_metadataURI_has_changed ( service_registration ) ) :
return
else :
service_registration = self . _get_service_registration ( )
service_metadata = self . _get_service_metadata_from_registry ( )
self . _init_or_update_service_if_needed ( service_metadata , service_registration )
|
def add_quota ( self , quota ) :
"""Adds an internal tracking reference for the given quota ."""
|
if quota . limit in ( None , - 1 , float ( 'inf' ) ) : # Handle " unlimited " quotas .
self . usages [ quota . name ] [ 'quota' ] = float ( "inf" )
self . usages [ quota . name ] [ 'available' ] = float ( "inf" )
else :
self . usages [ quota . name ] [ 'quota' ] = int ( quota . limit )
|
def gen_pager_purecss ( cat_slug , page_num , current ) :
'''Generate pager of purecss .'''
|
if page_num == 1 :
return ''
pager_shouye = '''<li class="pure-menu-item {0}">
<a class="pure-menu-link" href="{1}"><< 首页</a></li>''' . format ( 'hidden' if current <= 1 else '' , cat_slug )
pager_pre = '''<li class="pure-menu-item {0}">
<a class="pure-menu-link" href="{1}/{2}">< 前页</a>
</li>''' . format ( 'hidden' if current <= 1 else '' , cat_slug , current - 1 )
pager_mid = ''
for ind in range ( 0 , page_num ) :
tmp_mid = '''<li class="pure-menu-item {0}">
<a class="pure-menu-link" href="{1}/{2}">{2}</a></li>
''' . format ( 'selected' if ind + 1 == current else '' , cat_slug , ind + 1 )
pager_mid += tmp_mid
pager_next = '''<li class="pure-menu-item {0}">
<a class="pure-menu-link" href="{1}/{2}">后页 ></a>
</li> ''' . format ( 'hidden' if current >= page_num else '' , cat_slug , current + 1 )
pager_last = '''<li class="pure-menu-item {0}">
<a hclass="pure-menu-link" ref="{1}/{2}">末页
>></a>
</li> ''' . format ( 'hidden' if current >= page_num else '' , cat_slug , page_num )
pager = pager_shouye + pager_pre + pager_mid + pager_next + pager_last
return pager
|
def _rpartition ( entity , sep ) :
"""Python2.4 doesn ' t have an rpartition method so we provide
our own that mimics str . rpartition from later releases .
Split the string at the last occurrence of sep , and return a
3 - tuple containing the part before the separator , the separator
itself , and the part after the separator . If the separator is not
found , return a 3 - tuple containing two empty strings , followed
by the string itself ."""
|
idx = entity . rfind ( sep )
if idx == - 1 :
return '' , '' , entity
return entity [ : idx ] , sep , entity [ idx + 1 : ]
|
def batch_step ( self , batch_idx = None ) :
"""Updates the learning rate for the batch index : ` ` batch _ idx ` ` .
If ` ` batch _ idx ` ` is None , ` ` CyclicLR ` ` will use an internal
batch index to keep track of the index ."""
|
if batch_idx is None :
batch_idx = self . last_batch_idx + 1
self . last_batch_idx = batch_idx
for param_group , lr in zip ( self . optimizer . param_groups , self . get_lr ( ) ) :
param_group [ 'lr' ] = lr
|
def set_metadata_value ( metadata_source , key : str , value : typing . Any ) -> None :
"""Set the metadata value for the given key .
There are a set of predefined keys that , when used , will be type checked and be interoperable with other
applications . Please consult reference documentation for valid keys .
If using a custom key , we recommend structuring your keys in the ' < group > . < attribute > ' format followed
by the predefined keys . e . g . ' session . instrument ' or ' camera . binning ' .
Also note that some predefined keys map to the metadata ` ` dict ` ` but others do not . For this reason , prefer
using the ` ` metadata _ value ` ` methods over directly accessing ` ` metadata ` ` ."""
|
desc = session_key_map . get ( key )
if desc is not None :
d0 = getattr ( metadata_source , "session_metadata" , dict ( ) )
d = d0
for k in desc [ 'path' ] [ : - 1 ] :
d = d . setdefault ( k , dict ( ) ) if d is not None else None
if d is not None :
d [ desc [ 'path' ] [ - 1 ] ] = value
metadata_source . session_metadata = d0
return
desc = key_map . get ( key )
if desc is not None :
d0 = getattr ( metadata_source , "metadata" , dict ( ) )
d = d0
for k in desc [ 'path' ] [ : - 1 ] :
d = d . setdefault ( k , dict ( ) ) if d is not None else None
if d is not None :
d [ desc [ 'path' ] [ - 1 ] ] = value
metadata_source . metadata = d0
return
raise KeyError ( )
|
def main ( handwriting_datasets_file , analyze_features ) :
"""Start the creation of the wanted metric ."""
|
# Load from pickled file
logging . info ( "Start loading data '%s' ..." , handwriting_datasets_file )
loaded = pickle . load ( open ( handwriting_datasets_file ) )
raw_datasets = loaded [ 'handwriting_datasets' ]
logging . info ( "%i datasets loaded." , len ( raw_datasets ) )
logging . info ( "Start analyzing..." )
if analyze_features :
featurelist = [ ( features . AspectRatio ( ) , "aspect_ratio.csv" ) , ( features . ReCurvature ( 1 ) , "re_curvature.csv" ) , ( features . Height ( ) , "height.csv" ) , ( features . Width ( ) , "width.csv" ) , ( features . Time ( ) , "time.csv" ) , ( features . Ink ( ) , "ink.csv" ) , ( features . StrokeCount ( ) , "stroke-count.csv" ) ]
for feat , filename in featurelist :
logging . info ( "create %s..." , filename )
analyze_feature ( raw_datasets , feat , filename )
# Analyze everything specified in configuration
cfg = utils . get_project_configuration ( )
if 'data_analyzation_queue' in cfg :
metrics = dam . get_metrics ( cfg [ 'data_analyzation_queue' ] )
for metric in metrics :
logging . info ( "Start metric %s..." , str ( metric ) )
metric ( raw_datasets )
else :
logging . info ( "No 'data_analyzation_queue' in ~/.hwrtrc" )
|
def function_parser ( function , parser ) :
"""This function parses a function and adds its arguments to the supplied parser"""
|
# Store the function pointer on the parser for later use
parser . set_defaults ( func = function )
# Get the help text and parse it for params
help_text = inspect . getdoc ( function )
main_text , params_help = parser_help_text ( help_text )
# Get the function information
args , varargs , keywords , defaults = inspect . getargspec ( function )
if args is None :
args = [ ]
if defaults is None :
defaults = [ ]
# If the function is a class method , it will have a self that needs to be removed
if len ( args ) and args [ 0 ] == 'self' :
args . pop ( 0 )
# Work out whether the argument has a default by subtracting the length
# of the default args from the number of arguments
num_required_args = len ( args ) - len ( defaults )
for idx , arg in enumerate ( args ) :
if idx < num_required_args :
arg_name , arg_params = calculate_default_type ( arg , False , None , params_help )
else :
default_value = defaults [ idx - num_required_args ]
arg_name , arg_params = calculate_default_type ( arg , True , default_value , params_help )
parser . add_argument ( arg_name , ** arg_params )
|
async def reset_type_codec ( self , typename , * , schema = 'public' ) :
"""Reset * typename * codec to the default implementation .
: param typename :
Name of the data type the codec is for .
: param schema :
Schema name of the data type the codec is for
( defaults to ` ` ' public ' ` ` )
. . versionadded : : 0.12.0"""
|
typeinfo = await self . fetchrow ( introspection . TYPE_BY_NAME , typename , schema )
if not typeinfo :
raise ValueError ( 'unknown type: {}.{}' . format ( schema , typename ) )
oid = typeinfo [ 'oid' ]
self . _protocol . get_settings ( ) . remove_python_codec ( oid , typename , schema )
# Statement cache is no longer valid due to codec changes .
self . _drop_local_statement_cache ( )
|
def help_function ( self , command = None ) :
"""Show help for all available commands or just a single one"""
|
if command :
return self . registered [ command ] . get ( 'description' , 'No help available' )
return ', ' . join ( sorted ( self . registered ) )
|
def getOverlayDualAnalogTransform ( self , ulOverlay , eWhich ) :
"""Gets the analog input to Dual Analog coordinate scale for the specified overlay ."""
|
fn = self . function_table . getOverlayDualAnalogTransform
pvCenter = HmdVector2_t ( )
pfRadius = c_float ( )
result = fn ( ulOverlay , eWhich , byref ( pvCenter ) , byref ( pfRadius ) )
return result , pvCenter , pfRadius . value
|
def _index_audio_ibm ( self , basename = None , replace_already_indexed = False , continuous = True , model = "en-US_BroadbandModel" , word_confidence = True , word_alternatives_threshold = 0.9 , profanity_filter_for_US_results = False ) :
"""Implements a search - suitable interface for Watson speech API .
Some explaination of the parameters here have been taken from [ 1 ] _
Parameters
basename : str , optional
A specific basename to be indexed and is placed in src _ dir
e . g ` audio . wav ` .
If ` None ` is selected , all the valid audio files would be indexed .
Default is ` None ` .
replace _ already _ indexed : bool
` True ` , To reindex some audio file that ' s already in the
timestamps .
Default is ` False ` .
continuous : bool
Indicates whether multiple final results that represent consecutive
phrases separated by long pauses are returned .
If true , such phrases are returned ; if false ( the default ) ,
recognition ends after the first end - of - speech ( EOS ) incident is
detected .
Default is ` True ` .
model : {
' ar - AR _ BroadbandModel ' ,
' en - UK _ BroadbandModel '
' en - UK _ NarrowbandModel ' ,
' en - US _ BroadbandModel ' , ( the default )
' en - US _ NarrowbandModel ' ,
' es - ES _ BroadbandModel ' ,
' es - ES _ NarrowbandModel ' ,
' fr - FR _ BroadbandModel ' ,
' ja - JP _ BroadbandModel ' ,
' ja - JP _ NarrowbandModel ' ,
' pt - BR _ BroadbandModel ' ,
' pt - BR _ NarrowbandModel ' ,
' zh - CN _ BroadbandModel ' ,
' zh - CN _ NarrowbandModel '
The identifier of the model to be used for the recognition
Default is ' en - US _ BroadbandModel '
word _ confidence : bool
Indicates whether a confidence measure in the range of 0 to 1 is
returned for each word .
The default is True . ( It ' s False in the original )
word _ alternatives _ threshold : numeric
A confidence value that is the lower bound for identifying a
hypothesis as a possible word alternative ( also known as
" Confusion Networks " ) . An alternative word is considered if its
confidence is greater than or equal to the threshold . Specify a
probability between 0 and 1 inclusive .
Default is ` 0.9 ` .
profanity _ filter _ for _ US _ results : bool
Indicates whether profanity filtering is performed on the
transcript . If true , the service filters profanity from all output
by replacing inappropriate words with a series of asterisks .
If false , the service returns results with no censoring . Applies
to US English transcription only .
Default is ` False ` .
References
. . [ 1 ] : https : / / ibm . com / watson / developercloud / speech - to - text / api / v1/"""
|
params = { 'continuous' : continuous , 'model' : model , 'word_alternatives_threshold' : word_alternatives_threshold , 'word_confidence' : word_confidence , 'timestamps' : True , 'inactivity_timeout' : str ( - 1 ) , 'profanity_filter' : profanity_filter_for_US_results }
self . _prepare_audio ( basename = basename , replace_already_indexed = replace_already_indexed )
for staging_audio_basename in self . _list_audio_files ( sub_dir = "staging" ) :
original_audio_name = '' . join ( staging_audio_basename . split ( '.' ) [ : - 1 ] ) [ : - 3 ]
with open ( "{}/staging/{}" . format ( self . src_dir , staging_audio_basename ) , "rb" ) as f :
if self . get_verbosity ( ) :
print ( "Uploading {}..." . format ( staging_audio_basename ) )
response = requests . post ( url = ( "https://stream.watsonplatform.net/" "speech-to-text/api/v1/recognize" ) , auth = ( self . get_username_ibm ( ) , self . get_password_ibm ( ) ) , headers = { 'content-type' : 'audio/wav' } , data = f . read ( ) , params = params )
if self . get_verbosity ( ) :
print ( "Indexing {}..." . format ( staging_audio_basename ) )
self . __timestamps_unregulated [ original_audio_name + ".wav" ] . append ( self . _timestamp_extractor_ibm ( staging_audio_basename , json . loads ( response . text ) ) )
if self . get_verbosity ( ) :
print ( "Done indexing {}" . format ( staging_audio_basename ) )
self . _timestamp_regulator ( )
if self . get_verbosity ( ) :
print ( "Indexing procedure finished" )
|
def del_calculation ( job_id , confirmed = False ) :
"""Delete a calculation and all associated outputs ."""
|
if logs . dbcmd ( 'get_job' , job_id ) is None :
print ( 'There is no job %d' % job_id )
return
if confirmed or confirm ( 'Are you sure you want to (abort and) delete this calculation and ' 'all associated outputs?\nThis action cannot be undone. (y/n): ' ) :
try :
abort ( job_id )
resp = logs . dbcmd ( 'del_calc' , job_id , getpass . getuser ( ) )
except RuntimeError as err :
safeprint ( err )
else :
if 'success' in resp :
print ( 'Removed %d' % job_id )
else :
print ( resp [ 'error' ] )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.