signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def encode_sentence ( sentence ) :
"""Encode a single sentence .""" | tree = etree . Element ( 'sentence' )
encode_infons ( tree , sentence . infons )
etree . SubElement ( tree , 'offset' ) . text = str ( sentence . offset )
if sentence . text :
etree . SubElement ( tree , 'text' ) . text = sentence . text
for ann in sentence . annotations :
tree . append ( encode_annotation ( ann ) )
for rel in sentence . relations :
tree . append ( encode_relation ( rel ) )
return tree |
def fwd ( self , astr_startPath , ** kwargs ) :
"""Return the files - in - working - directory in treeRecurse
compatible format .
: return : Return the cwd in treeRecurse compatible format .""" | status = self . cd ( astr_startPath ) [ 'status' ]
if status :
l = self . lsf ( )
if len ( l ) :
lf = [ self . cwd ( ) + '/' + f for f in l ]
for entry in lf :
self . l_fwd . append ( entry )
return { 'status' : status , 'cwd' : self . cwd ( ) } |
def is_authorized ( self , request ) :
"""Check if the user is authenticated for the given request .
The include _ paths and exclude _ paths are first checked . If
authentication is required then the Authorization HTTP header is
checked against the credentials .""" | if self . _is_request_in_include_path ( request ) :
if self . _is_request_in_exclude_path ( request ) :
return True
else :
auth = request . authorization
if auth and auth [ 0 ] == 'Basic' :
credentials = b64decode ( auth [ 1 ] ) . decode ( 'UTF-8' )
username , password = credentials . split ( ':' , 1 )
return self . _users . get ( username ) == password
else :
return False
else :
return True |
def _process_pair ( first_fn , second_fn , error_protocol ) :
"""Look at given filenames , decide which is what and try to pair them .""" | ebook = None
metadata = None
if _is_meta ( first_fn ) and not _is_meta ( second_fn ) : # 1st meta , 2nd data
logger . debug ( "Parsed: '%s' as meta, '%s' as data." % ( first_fn , second_fn ) )
metadata , ebook = first_fn , second_fn
elif not _is_meta ( first_fn ) and _is_meta ( second_fn ) : # 1st data , 2nd meta
logger . debug ( "Parsed: '%s' as meta, '%s' as data." % ( second_fn , first_fn ) )
metadata , ebook = second_fn , first_fn
elif _is_meta ( first_fn ) and _is_meta ( second_fn ) : # both metadata
logger . debug ( "Parsed: both '%s' and '%s' as meta." % ( first_fn , second_fn ) )
return [ _safe_read_meta_file ( first_fn , error_protocol ) , _safe_read_meta_file ( second_fn , error_protocol ) ]
else : # both data
logger . debug ( "Parsed: both '%s' and '%s' as data." % ( first_fn , second_fn ) )
return [ EbookFile ( first_fn ) , EbookFile ( second_fn ) ]
# process pairs , which were created in first two branches of the if
# statement above
pair = DataPair ( metadata_file = _safe_read_meta_file ( metadata , error_protocol ) , ebook_file = EbookFile ( ebook ) )
if not pair . metadata_file :
logger . error ( "Can't parse MetadataFile '%s'. Continuing with data file '%s'." % ( metadata , ebook ) )
return [ pair . ebook_file ]
return [ pair ] |
def resolve ( self , year : int = YEAR_ANY ) -> MDate :
"""Return the date object in the solar / lunar year .
: param year :
: return :""" | year = year or self . year
if year :
return self . _resolve ( year )
raise ValueError ( 'Unable resolve the date without a specified year.' ) |
def on_mouse_wheel ( self , event ) :
'''handle mouse wheel zoom changes''' | state = self . state
if not state . can_zoom :
return
mousepos = self . image_coordinates ( event . GetPosition ( ) )
rotation = event . GetWheelRotation ( ) / event . GetWheelDelta ( )
oldzoom = self . zoom
if rotation > 0 :
self . zoom /= 1.0 / ( 1.1 * rotation )
elif rotation < 0 :
self . zoom /= 1.1 * ( - rotation )
if self . zoom > 10 :
self . zoom = 10
elif self . zoom < 0.1 :
self . zoom = 0.1
if oldzoom < 1 and self . zoom > 1 :
self . zoom = 1
if oldzoom > 1 and self . zoom < 1 :
self . zoom = 1
client_area = state . frame . GetClientSize ( )
fit_window_zoom_level = min ( float ( client_area . x ) / self . img . GetWidth ( ) , float ( client_area . y ) / self . img . GetHeight ( ) )
if self . zoom < fit_window_zoom_level :
self . zoom = fit_window_zoom_level
self . need_redraw = True
new = self . image_coordinates ( event . GetPosition ( ) )
# adjust dragpos so the zoom doesn ' t change what pixel is under the mouse
self . dragpos = wx . Point ( self . dragpos . x - ( new . x - mousepos . x ) , self . dragpos . y - ( new . y - mousepos . y ) )
self . limit_dragpos ( ) |
def is_local ( path ) : # type : ( str ) - > bool
"""Return True if path is within sys . prefix , if we ' re running in a virtualenv .
If we ' re not in a virtualenv , all paths are considered " local . " """ | if not running_under_virtualenv ( ) :
return True
return normalize_path ( path ) . startswith ( normalize_path ( sys . prefix ) ) |
def builtin_lookup ( name ) :
"""lookup a name into the builtin module
return the list of matching statements and the astroid for the builtin
module""" | builtin_astroid = MANAGER . ast_from_module ( builtins )
if name == "__dict__" :
return builtin_astroid , ( )
try :
stmts = builtin_astroid . locals [ name ]
except KeyError :
stmts = ( )
return builtin_astroid , stmts |
def run ( self , directories = None ) :
"""Finds and runs the specs . Returns a tuple indicating the
number of ( succeses , failures , skipped ) >""" | if directories is None :
directories = [ os . getcwd ( ) ]
total_successes , total_errors , total_skipped = 0 , 0 , 0
for directory in directories :
successes , errors , skips = self . execute ( self . find_specs ( directory ) )
total_successes += successes
total_errors += errors
total_skipped += skips
self . formatter . finalize ( )
return total_successes , total_errors , total_skipped |
def register_image ( kwargs = None , call = None ) :
'''Create an ami from a snapshot
CLI Example :
. . code - block : : bash
salt - cloud - f register _ image my - ec2 - config ami _ name = my _ ami description = " my description "
root _ device _ name = / dev / xvda snapshot _ id = snap - xxxxx''' | if call != 'function' :
log . error ( 'The create_volume function must be called with -f or --function.' )
return False
if 'ami_name' not in kwargs :
log . error ( 'ami_name must be specified to register an image.' )
return False
block_device_mapping = kwargs . get ( 'block_device_mapping' , None )
if not block_device_mapping :
if 'snapshot_id' not in kwargs :
log . error ( 'snapshot_id or block_device_mapping must be specified to register an image.' )
return False
if 'root_device_name' not in kwargs :
log . error ( 'root_device_name or block_device_mapping must be specified to register an image.' )
return False
block_device_mapping = [ { 'DeviceName' : kwargs [ 'root_device_name' ] , 'Ebs' : { 'VolumeType' : kwargs . get ( 'volume_type' , 'gp2' ) , 'SnapshotId' : kwargs [ 'snapshot_id' ] , } } ]
if not isinstance ( block_device_mapping , list ) :
block_device_mapping = [ block_device_mapping ]
params = { 'Action' : 'RegisterImage' , 'Name' : kwargs [ 'ami_name' ] }
params . update ( _param_from_config ( 'BlockDeviceMapping' , block_device_mapping ) )
if 'root_device_name' in kwargs :
params [ 'RootDeviceName' ] = kwargs [ 'root_device_name' ]
if 'description' in kwargs :
params [ 'Description' ] = kwargs [ 'description' ]
if 'virtualization_type' in kwargs :
params [ 'VirtualizationType' ] = kwargs [ 'virtualization_type' ]
if 'architecture' in kwargs :
params [ 'Architecture' ] = kwargs [ 'architecture' ]
log . debug ( params )
data = aws . query ( params , return_url = True , return_root = True , location = get_location ( ) , provider = get_provider ( ) , opts = __opts__ , sigver = '4' )
r_data = { }
for d in data [ 0 ] :
for k , v in d . items ( ) :
r_data [ k ] = v
return r_data |
def port_id_from_name ( port_name , device_id , nodes ) :
"""Get the port ID when given a port name
: param str port _ name : port name
: param str device _ id : device ID
: param list nodes : list of nodes from : py : meth : ` generate _ nodes `
: return : port ID
: rtype : int""" | port_id = None
for node in nodes :
if device_id == node [ 'id' ] :
for port in node [ 'ports' ] :
if port_name == port [ 'name' ] :
port_id = port [ 'id' ]
break
break
return port_id |
def state_fidelity ( state0 : State , state1 : State ) -> bk . BKTensor :
"""Return the quantum fidelity between pure states .""" | assert state0 . qubits == state1 . qubits
# FIXME
tensor = bk . absolute ( bk . inner ( state0 . tensor , state1 . tensor ) ) ** bk . fcast ( 2 )
return tensor |
def default_gateway ( ) :
'''Populates grains which describe whether a server has a default gateway
configured or not . Uses ` ip - 4 route show ` and ` ip - 6 route show ` and greps
for a ` default ` at the beginning of any line . Assuming the standard
` default via < ip > ` format for default gateways , it will also parse out the
ip address of the default gateway , and put it in ip4 _ gw or ip6 _ gw .
If the ` ip ` command is unavailable , no grains will be populated .
Currently does not support multiple default gateways . The grains will be
set to the first default gateway found .
List of grains :
ip4 _ gw : True # ip / True / False if default ipv4 gateway
ip6 _ gw : True # ip / True / False if default ipv6 gateway
ip _ gw : True # True if either of the above is True , False otherwise''' | grains = { }
ip_bin = salt . utils . path . which ( 'ip' )
if not ip_bin :
return { }
grains [ 'ip_gw' ] = False
grains [ 'ip4_gw' ] = False
grains [ 'ip6_gw' ] = False
for ip_version in ( '4' , '6' ) :
try :
out = __salt__ [ 'cmd.run' ] ( [ ip_bin , '-' + ip_version , 'route' , 'show' ] )
for line in out . splitlines ( ) :
if line . startswith ( 'default' ) :
grains [ 'ip_gw' ] = True
grains [ 'ip{0}_gw' . format ( ip_version ) ] = True
try :
via , gw_ip = line . split ( ) [ 1 : 3 ]
except ValueError :
pass
else :
if via == 'via' :
grains [ 'ip{0}_gw' . format ( ip_version ) ] = gw_ip
break
except Exception :
continue
return grains |
def sparse_surface ( self ) :
"""Filled cells on the surface of the mesh .
Returns
voxels : ( n , 3 ) int , filled cells on mesh surface""" | if self . _method == 'ray' :
func = voxelize_ray
elif self . _method == 'subdivide' :
func = voxelize_subdivide
else :
raise ValueError ( 'voxelization method incorrect' )
voxels , origin = func ( mesh = self . _data [ 'mesh' ] , pitch = self . _data [ 'pitch' ] , max_iter = self . _data [ 'max_iter' ] [ 0 ] )
self . _cache [ 'origin' ] = origin
return voxels |
def renumerate_stages ( pipeline ) :
"""Renumber Pipeline Stage reference IDs to account for dependencies .
stage order is defined in the templates . The ` ` refId ` ` field dictates
if a stage should be mainline or parallel to other stages .
* ` ` master ` ` - A mainline required stage . Other stages depend on it
* ` ` branch ` ` - A stage that should be ran in parallel to master stages .
* ` ` merge ` ` - A stage thatis parallel but other stages still depend on it .
Args :
pipeline ( dict ) : Completed Pipeline ready for renumeration .
Returns :
dict : Pipeline ready to be sent to Spinnaker .""" | stages = pipeline [ 'stages' ]
main_index = 0
branch_index = 0
previous_refid = ''
for stage in stages :
current_refid = stage [ 'refId' ] . lower ( )
if current_refid == 'master' :
if main_index == 0 :
stage [ 'requisiteStageRefIds' ] = [ ]
else :
stage [ 'requisiteStageRefIds' ] = [ str ( main_index ) ]
main_index += 1
stage [ 'refId' ] = str ( main_index )
elif current_refid == 'branch' : # increments a branch _ index to account for multiple parrallel stages
if previous_refid == 'branch' :
branch_index += 1
else :
branch_index = 0
stage [ 'refId' ] = str ( ( main_index * 100 ) + branch_index )
stage [ 'requisiteStageRefIds' ] = [ str ( main_index ) ]
elif current_refid == 'merge' : # TODO : Added logic to handle merge stages .
pass
previous_refid = current_refid
LOG . debug ( 'step=%(name)s\trefId=%(refId)s\t' 'requisiteStageRefIds=%(requisiteStageRefIds)s' , stage )
return pipeline |
def combine ( files : List [ str ] , output_file : str , key : str = None , file_attrs : Dict [ str , str ] = None , batch_size : int = 1000 , convert_attrs : bool = False ) -> None :
"""Combine two or more loom files and save as a new loom file
Args :
files ( list of str ) : the list of input files ( full paths )
output _ file ( str ) : full path of the output loom file
key ( string ) : Row attribute to use to verify row ordering
file _ attrs ( dict ) : file attributes ( title , description , url , etc . )
batch _ size ( int ) : limits the batch or cols / rows read in memory ( default : 1000)
convert _ attrs ( bool ) : convert file attributes that differ between files into column attributes
Returns :
Nothing , but creates a new loom file combining the input files .
Note that the loom files must have exactly the same
number of rows , and must have exactly the same column attributes .
Named layers not present in the first file are discarded .
. . warning : :
If you don ' t give a ` ` key ` ` argument , the files will be combined without changing
the ordering of rows or columns . Row attributes will be taken from the first file .
Hence , if rows are not in the same order in all files , the result may be meaningless .
To guard against this issue , you are strongly advised to provide a ` ` key ` ` argument ,
which is used to sort files while merging . The ` ` key ` ` should be the name of a row
atrribute that contains a unique value for each row . For example , to order rows by
the attribute ` ` Accession ` ` :
. . highlight : : python
. . code - block : : python
import loompy
loompy . combine ( files , key = " Accession " )""" | if file_attrs is None :
file_attrs = { }
if len ( files ) == 0 :
raise ValueError ( "The input file list was empty" )
copyfile ( files [ 0 ] , output_file )
ds = connect ( output_file )
for a in file_attrs :
ds . attrs [ a ] = file_attrs [ a ]
if len ( files ) >= 2 :
for f in files [ 1 : ] :
ds . add_loom ( f , key , batch_size = batch_size , convert_attrs = convert_attrs )
ds . close ( ) |
def actual_causation ( ) :
"""The actual causation example network , consisting of an ` ` OR ` ` and
` ` AND ` ` gate with self - loops .""" | tpm = np . array ( [ [ 1 , 0 , 0 , 0 ] , [ 0 , 1 , 0 , 0 ] , [ 0 , 1 , 0 , 0 ] , [ 0 , 0 , 0 , 1 ] ] )
cm = np . array ( [ [ 1 , 1 ] , [ 1 , 1 ] ] )
return Network ( tpm , cm , node_labels = ( 'OR' , 'AND' ) ) |
def color_ramp ( number_of_colour ) :
"""Generate list of color in hexadecimal .
This will generate colors using hsl model by playing around with the hue
see : https : / / coderwall . com / p / dvsxwg / smoothly - transition - from - green - to - red
: param number _ of _ colour : The number of intervals between R and G spectrum .
: type number _ of _ colour : int
: returns : List of color .
: rtype : list""" | if number_of_colour < 1 :
raise Exception ( 'The number of colours should be > 0' )
colors = [ ]
if number_of_colour == 1 :
hue_interval = 1
else :
hue_interval = 1.0 / ( number_of_colour - 1 )
for i in range ( number_of_colour ) :
hue = ( i * hue_interval ) / 3
light = 127.5
saturation = - 1.007905138339921
rgb = colorsys . hls_to_rgb ( hue , light , saturation )
hex_color = '#%02x%02x%02x' % ( int ( rgb [ 0 ] ) , int ( rgb [ 1 ] ) , int ( rgb [ 2 ] ) )
colors . append ( hex_color )
return colors |
def _find_missing ( self , data , return_bool = False ) :
"""Parameters
data : pd . DataFrame ( )
Input dataframe .
return _ bool : bool
Returns
pd . DataFrame ( )""" | # This returns the full table with True where the condition is true
if return_bool == False :
data = self . _find_missing_return_frame ( data )
return data
# This returns a bool selector if any of the column is True
elif return_bool == "any" :
bool_sel = self . _find_missing_return_frame ( data ) . any ( axis = 0 )
return bool_sel
# This returns a bool selector if all of the column are True
elif return_bool == "all" :
bool_sel = self . _find_missing_return_frame ( data ) . all ( axis = 0 )
return bool_sel
else :
print ( "error in multi_col_how input" ) |
def resolve_job ( self , name ) :
"""Attempt to resolve the task name in to a job name .
If no job resolver can resolve the task , i . e . they all return None ,
return None .
Keyword arguments :
name - - Name of the task to be resolved .""" | for r in self . job_resolvers ( ) :
resolved_name = r ( name )
if resolved_name is not None :
return resolved_name
return None |
def split_by_percent ( self , spin_systems_list ) :
"""Split list of spin systems by specified percentages .
: param list spin _ systems _ list : List of spin systems .
: return : List of spin systems divided into sub - lists corresponding to specified split percentages .
: rtype : : py : class : ` list `""" | chunk_sizes = [ int ( ( i * len ( spin_systems_list ) ) / 100 ) for i in self . plsplit ]
if sum ( chunk_sizes ) < len ( spin_systems_list ) :
difference = len ( spin_systems_list ) - sum ( chunk_sizes )
chunk_sizes [ chunk_sizes . index ( min ( chunk_sizes ) ) ] += difference
assert sum ( chunk_sizes ) == len ( spin_systems_list ) , "sum of chunk sizes must be equal to spin systems list length."
intervals = self . calculate_intervals ( chunk_sizes )
chunks_of_spin_systems_by_percentage = [ itertools . islice ( spin_systems_list , * interval ) for interval in intervals ]
return chunks_of_spin_systems_by_percentage |
def _delete ( self , pk ) :
"""Delete function logic , override to implement diferent logic
deletes the record with primary _ key = pk
: param pk :
record primary key to delete""" | item = self . datamodel . get ( pk , self . _base_filters )
if not item :
abort ( 404 )
try :
self . pre_delete ( item )
except Exception as e :
flash ( str ( e ) , 'danger' )
else :
view_menu = security_manager . find_view_menu ( item . get_perm ( ) )
pvs = security_manager . get_session . query ( security_manager . permissionview_model ) . filter_by ( view_menu = view_menu ) . all ( )
schema_view_menu = None
if hasattr ( item , 'schema_perm' ) :
schema_view_menu = security_manager . find_view_menu ( item . schema_perm )
pvs . extend ( security_manager . get_session . query ( security_manager . permissionview_model ) . filter_by ( view_menu = schema_view_menu ) . all ( ) )
if self . datamodel . delete ( item ) :
self . post_delete ( item )
for pv in pvs :
security_manager . get_session . delete ( pv )
if view_menu :
security_manager . get_session . delete ( view_menu )
if schema_view_menu :
security_manager . get_session . delete ( schema_view_menu )
security_manager . get_session . commit ( )
flash ( * self . datamodel . message )
self . update_redirect ( ) |
def _pull_schema_definition ( self , fname ) :
"""download an ontology definition from the web""" | std_url = urlopen ( self . _ontology_file )
cached_std = open ( fname , "w+" )
cached_std . write ( std_url . read ( ) )
cached_std . close ( ) |
def _send_upstream ( queue , client , codec , batch_time , batch_size , req_acks , ack_timeout , retry_options , stop_event , log_messages_on_error = ASYNC_LOG_MESSAGES_ON_ERROR , stop_timeout = ASYNC_STOP_TIMEOUT_SECS , codec_compresslevel = None ) :
"""Private method to manage producing messages asynchronously
Listens on the queue for a specified number of messages or until
a specified timeout and then sends messages to the brokers in grouped
requests ( one per broker ) .
Messages placed on the queue should be tuples that conform to this format :
( ( topic , partition ) , message , key )
Currently does not mark messages with task _ done . Do not attempt to join ( ) !
Arguments :
queue ( threading . Queue ) : the queue from which to get messages
client ( KafkaClient ) : instance to use for communicating with brokers
codec ( kafka . protocol . ALL _ CODECS ) : compression codec to use
batch _ time ( int ) : interval in seconds to send message batches
batch _ size ( int ) : count of messages that will trigger an immediate send
req _ acks : required acks to use with ProduceRequests . see server protocol
ack _ timeout : timeout to wait for required acks . see server protocol
retry _ options ( RetryOptions ) : settings for retry limits , backoff etc
stop _ event ( threading . Event ) : event to monitor for shutdown signal .
when this event is ' set ' , the producer will stop sending messages .
log _ messages _ on _ error ( bool , optional ) : log stringified message - contents
on any produce error , otherwise only log a hash ( ) of the contents ,
defaults to True .
stop _ timeout ( int or float , optional ) : number of seconds to continue
retrying messages after stop _ event is set , defaults to 30.""" | request_tries = { }
while not stop_event . is_set ( ) :
try :
client . reinit ( )
except Exception as e :
log . warn ( 'Async producer failed to connect to brokers; backoff for %s(ms) before retrying' , retry_options . backoff_ms )
time . sleep ( float ( retry_options . backoff_ms ) / 1000 )
else :
break
stop_at = None
while not ( stop_event . is_set ( ) and queue . empty ( ) and not request_tries ) : # Handle stop _ timeout
if stop_event . is_set ( ) :
if not stop_at :
stop_at = stop_timeout + time . time ( )
if time . time ( ) > stop_at :
log . debug ( 'Async producer stopping due to stop_timeout' )
break
timeout = batch_time
count = batch_size
send_at = time . time ( ) + timeout
msgset = defaultdict ( list )
# Merging messages will require a bit more work to manage correctly
# for now , dont look for new batches if we have old ones to retry
if request_tries :
count = 0
log . debug ( 'Skipping new batch collection to handle retries' )
else :
log . debug ( 'Batching size: %s, timeout: %s' , count , timeout )
# Keep fetching till we gather enough messages or a
# timeout is reached
while count > 0 and timeout >= 0 :
try :
topic_partition , msg , key = queue . get ( timeout = timeout )
except Empty :
break
# Check if the controller has requested us to stop
if topic_partition == STOP_ASYNC_PRODUCER :
stop_event . set ( )
break
# Adjust the timeout to match the remaining period
count -= 1
timeout = send_at - time . time ( )
msgset [ topic_partition ] . append ( ( msg , key ) )
# Send collected requests upstream
for topic_partition , msg in msgset . items ( ) :
messages = create_message_set ( msg , codec , key , codec_compresslevel )
req = ProduceRequest ( topic_partition . topic , topic_partition . partition , tuple ( messages ) )
request_tries [ req ] = 0
if not request_tries :
continue
reqs_to_retry , error_cls = [ ] , None
retry_state = { 'do_backoff' : False , 'do_refresh' : False }
def _handle_error ( error_cls , request ) :
if ( issubclass ( error_cls , RETRY_ERROR_TYPES ) or ( retry_options . retry_on_timeouts and issubclass ( error_cls , RequestTimedOutError ) ) ) :
reqs_to_retry . append ( request )
if issubclass ( error_cls , RETRY_BACKOFF_ERROR_TYPES ) :
retry_state [ 'do_backoff' ] |= True
if issubclass ( error_cls , RETRY_REFRESH_ERROR_TYPES ) :
retry_state [ 'do_refresh' ] |= True
requests = list ( request_tries . keys ( ) )
log . debug ( 'Sending: %s' , requests )
responses = client . send_produce_request ( requests , acks = req_acks , timeout = ack_timeout , fail_on_error = False )
log . debug ( 'Received: %s' , responses )
for i , response in enumerate ( responses ) :
error_cls = None
if isinstance ( response , FailedPayloadsError ) :
error_cls = response . __class__
orig_req = response . payload
elif isinstance ( response , ProduceResponse ) and response . error :
error_cls = kafka_errors . get ( response . error , UnknownError )
orig_req = requests [ i ]
if error_cls :
_handle_error ( error_cls , orig_req )
log . error ( '%s sending ProduceRequest (#%d of %d) ' 'to %s:%d with msgs %s' , error_cls . __name__ , ( i + 1 ) , len ( requests ) , orig_req . topic , orig_req . partition , orig_req . messages if log_messages_on_error else hash ( orig_req . messages ) )
if not reqs_to_retry :
request_tries = { }
continue
# doing backoff before next retry
if retry_state [ 'do_backoff' ] and retry_options . backoff_ms :
log . warn ( 'Async producer backoff for %s(ms) before retrying' , retry_options . backoff_ms )
time . sleep ( float ( retry_options . backoff_ms ) / 1000 )
# refresh topic metadata before next retry
if retry_state [ 'do_refresh' ] :
log . warn ( 'Async producer forcing metadata refresh metadata before retrying' )
try :
client . load_metadata_for_topics ( )
except Exception as e :
log . error ( "Async producer couldn't reload topic metadata. Error: `%s`" , e . message )
# Apply retry limit , dropping messages that are over
request_tries = dict ( ( key , count + 1 ) for ( key , count ) in request_tries . items ( ) if key in reqs_to_retry and ( retry_options . limit is None or ( count < retry_options . limit ) ) )
# Log messages we are going to retry
for orig_req in request_tries . keys ( ) :
log . info ( 'Retrying ProduceRequest to %s:%d with msgs %s' , orig_req . topic , orig_req . partition , orig_req . messages if log_messages_on_error else hash ( orig_req . messages ) )
if request_tries or not queue . empty ( ) :
log . error ( 'Stopped producer with {0} unsent messages' . format ( len ( request_tries ) + queue . qsize ( ) ) ) |
def result_tree_flat ( context , cl , request ) :
"""Added ' filtered ' param , so the template ' s js knows whether the results have
been affected by a GET param or not . Only when the results are not filtered
you can drag and sort the tree""" | return { # ' filtered ' : is _ filtered _ cl ( cl , request ) ,
'results' : ( th_for_result ( cl , res ) for res in list ( cl . result_list ) ) , } |
def _convert_strls ( self , data ) :
"""Convert columns to StrLs if either very large or in the
convert _ strl variable""" | convert_cols = [ col for i , col in enumerate ( data ) if self . typlist [ i ] == 32768 or col in self . _convert_strl ]
if convert_cols :
ssw = StataStrLWriter ( data , convert_cols )
tab , new_data = ssw . generate_table ( )
data = new_data
self . _strl_blob = ssw . generate_blob ( tab )
return data |
def getReward ( self ) :
"""Returns the reward corresponding to the last action performed .""" | t = self . env . market . period
# Compute revenue minus costs .
totalEarnings = 0.0
for g in self . env . generators : # Compute costs in $ ( not $ / hr ) .
costs = g . total_cost ( round ( g . p , 4 ) , self . env . _g0 [ g ] [ "p_cost" ] , self . env . _g0 [ g ] [ "pcost_model" ] )
offbids = [ ob for ob in self . env . _lastAction if ob . generator == g ]
revenue = t * sum ( [ ob . revenue for ob in offbids ] )
if offbids :
revenue += offbids [ 0 ] . noLoadCost
if g . is_load :
earnings = costs - revenue
else :
earnings = revenue - costs
# ( fixedCost + variableCost )
logger . debug ( "Generator [%s] earnings: %.2f (%.2f, %.2f)" % ( g . name , earnings , revenue , costs ) )
totalEarnings += earnings
# Startup / shutdown costs .
onlineCosts = 0.0
for i , g in enumerate ( self . env . generators ) :
if self . _gOnline [ i ] and not g . online :
onlineCosts += g . c_shutdown
elif not self . _gOnline [ i ] and g . online :
onlineCosts += g . c_startup
self . _gOnline = [ g . online for g in self . env . generators ]
reward = totalEarnings - onlineCosts
self . addReward ( reward )
logger . debug ( "Task reward: %.2f (%.2f - %.2f)" % ( reward , totalEarnings , onlineCosts ) )
return reward |
def markdown_editor ( selector ) :
"""Enable markdown editor for given textarea .
: returns : Editor template context .""" | return dict ( selector = selector , extra_settings = mark_safe ( simplejson . dumps ( dict ( previewParserPath = reverse ( 'django_markdown_preview' ) ) ) ) ) |
def _get_attribute_tensors ( onnx_model_proto ) : # type : ( ModelProto ) - > Iterable [ TensorProto ]
"""Create an iterator of tensors from node attributes of an ONNX model .""" | for node in onnx_model_proto . graph . node :
for attribute in node . attribute :
if attribute . HasField ( "t" ) :
yield attribute . t
for tensor in attribute . tensors :
yield tensor |
def _get_object_size ( data , position , obj_end ) :
"""Validate and return a BSON document ' s size .""" | try :
obj_size = _UNPACK_INT ( data [ position : position + 4 ] ) [ 0 ]
except struct . error as exc :
raise InvalidBSON ( str ( exc ) )
end = position + obj_size - 1
if data [ end : end + 1 ] != b"\x00" :
raise InvalidBSON ( "bad eoo" )
if end >= obj_end :
raise InvalidBSON ( "invalid object length" )
# If this is the top - level document , validate the total size too .
if position == 0 and obj_size != obj_end :
raise InvalidBSON ( "invalid object length" )
return obj_size , end |
def send_message ( self , msg ) :
"""Send any kind of message .
Parameters
msg : Message object
The message to send .""" | assert get_thread_ident ( ) == self . ioloop_thread_id
data = str ( msg ) + "\n"
# Log all sent messages here so no one else has to .
if self . _logger . isEnabledFor ( logging . DEBUG ) :
self . _logger . debug ( "Sending to {}: {}" . format ( self . bind_address_string , repr ( data ) ) )
if not self . _connected . isSet ( ) :
raise KatcpClientDisconnected ( 'Not connected to device {0}' . format ( self . bind_address_string ) )
try :
return self . _stream . write ( data )
except Exception :
self . _logger . warn ( 'Could not send message {0!r} to {1!r}' . format ( str ( msg ) , self . _bindaddr ) , exc_info = True )
self . _disconnect ( exc_info = True ) |
def get_config_input_source_config_source_running_running ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
get_config = ET . Element ( "get_config" )
config = get_config
input = ET . SubElement ( get_config , "input" )
source = ET . SubElement ( input , "source" )
config_source = ET . SubElement ( source , "config-source" )
running = ET . SubElement ( config_source , "running" )
running = ET . SubElement ( running , "running" )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def recv ( self ) :
"""Receives a message from PS and decrypts it and returns a Message""" | LOGGER . debug ( 'Receiving' )
try :
message_length = struct . unpack ( '>i' , self . _socket . recv ( 4 ) ) [ 0 ]
message_length -= Connection . COMM_LENGTH
LOGGER . debug ( 'Length: %i' , message_length )
except socket . timeout :
return None
comm_status = struct . unpack ( '>i' , self . _socket . recv ( 4 ) ) [ 0 ]
LOGGER . debug ( 'Status: %i' , comm_status )
bytes_received = 0
message = b""
while bytes_received < message_length :
if message_length - bytes_received >= 1024 :
recv_len = 1024
else :
recv_len = message_length - bytes_received
bytes_received += recv_len
LOGGER . debug ( 'Received %i' , bytes_received )
message += self . _socket . recv ( recv_len )
if comm_status == 0 :
message = self . _crypt . decrypt ( message )
else :
return Message ( len ( message ) , Connection . COMM_ERROR , message )
msg = Message ( message_length , comm_status , message )
return msg |
def members ( self ) :
"""Access the members
: returns : twilio . rest . chat . v1 . service . channel . member . MemberList
: rtype : twilio . rest . chat . v1 . service . channel . member . MemberList""" | if self . _members is None :
self . _members = MemberList ( self . _version , service_sid = self . _solution [ 'service_sid' ] , channel_sid = self . _solution [ 'sid' ] , )
return self . _members |
def upload ( self , fileobj , tileset , name = None , patch = False , callback = None , bypass = False ) :
"""Upload data and create a Mapbox tileset
Effectively replicates the Studio upload feature . Returns a
Response object , the json ( ) of which returns a dict with upload
metadata .
Parameters
fileobj : file object or str
A filename or a Python file object opened in binary mode .
tileset : str
A tileset identifier such as ' { owner } . my - tileset ' .
name : str
A short name for the tileset that will appear in Mapbox
studio .
patch : bool
Optional patch mode which requires a flag on the owner ' s
account .
bypass : bool
Optional bypass validation mode for MBTiles which requires
a flag on the owner ' s account .
callback : func
A function that takes a number of bytes processed as its
sole argument . May be used with a progress bar .
Returns
requests . Response""" | tileset = self . _validate_tileset ( tileset )
url = self . stage ( fileobj , callback = callback )
return self . create ( url , tileset , name = name , patch = patch , bypass = bypass ) |
def truncate ( self , index , chain = - 1 ) :
"""Tell the traces to truncate themselves at the given index .""" | chain = range ( self . chains ) [ chain ]
for name in self . trace_names [ chain ] :
self . _traces [ name ] . truncate ( index , chain ) |
def finalizePrivateLessonRegistration ( sender , ** kwargs ) :
'''Once a private lesson registration is finalized , mark the slots that were
used to book the private lesson as booked and associate them with the final
registration . No need to notify students in this instance because they are
already receiving a notification of their registration .''' | finalReg = kwargs . pop ( 'registration' )
for er in finalReg . eventregistration_set . filter ( event__privatelessonevent__isnull = False ) :
er . event . finalizeBooking ( eventRegistration = er , notifyStudent = False ) |
def _set_base_path_env ( ) : # type : ( ) - > None
"""Sets the environment variable SAGEMAKER _ BASE _ DIR as
~ / sagemaker _ local / { timestamp } / opt / ml
Returns :
( bool ) : indicating whe""" | local_config_dir = os . path . join ( os . path . expanduser ( '~' ) , 'sagemaker_local' , 'jobs' , str ( time . time ( ) ) , 'opt' , 'ml' )
logger . info ( 'Setting environment variable SAGEMAKER_BASE_DIR as %s .' % local_config_dir )
os . environ [ BASE_PATH_ENV ] = local_config_dir |
def to_dict ( self ) :
"""Convert to dictionary .
: return ( dict ) : A dict mapping from strings to vectors .""" | d = { }
for word , idx in self . vocab . iteritems ( ) :
d [ word ] = self . array [ idx ] . tolist ( )
return d |
def delete_package ( path , packages ) :
"""Remove downloaded packages""" | if _meta_ . del_all in [ "on" , "ON" ] :
for pkg in packages :
os . remove ( path + pkg ) |
def compute_ld ( cur_geno , other_genotypes , r2 = False ) :
"""Compute LD between a marker and a list of markers .
Args :
cur _ geno ( Genotypes ) : The genotypes of the marker .
other _ genotypes ( list ) : A list of genotypes .
Returns :
numpy . array : An array containing the r or r * * 2 values between cur _ geno
and other _ genotypes .
Note :
The genotypes will automatically be normalized using ( x - mean ) / std .""" | # Normalizing the current genotypes
norm_cur = normalize_genotypes ( cur_geno )
# Normalizing and creating the matrix for the other genotypes
norm_others = np . stack ( tuple ( normalize_genotypes ( g ) for g in other_genotypes ) , axis = 1 , )
# Making sure the size is the same
assert norm_cur . shape [ 0 ] == norm_others . shape [ 0 ]
# Getting the number of " samples " per marker ( taking into account NaN )
n = ( ~ np . isnan ( norm_cur . reshape ( norm_cur . shape [ 0 ] , 1 ) ) * ~ np . isnan ( norm_others ) ) . sum ( axis = 0 )
# Computing r ( replacing NaN by 0)
r = pd . Series ( np . dot ( np . nan_to_num ( norm_cur ) , np . nan_to_num ( norm_others ) / n ) , index = [ g . variant . name for g in other_genotypes ] , name = "r2" if r2 else "r" , )
# Checking no " invalid " values ( i . e . < - 1 or > 1)
r . loc [ r > 1 ] = 1
r . loc [ r < - 1 ] = - 1
if r2 :
return r ** 2
else :
return r |
def add_selected ( self , ) :
"""Create a new reftrack with the selected element and type and add it to the root .
: returns : None
: rtype : None
: raises : NotImplementedError""" | browser = self . shot_browser if self . browser_tabw . currentIndex ( ) == 1 else self . asset_browser
selelements = browser . selected_indexes ( 2 )
if not selelements :
return
seltypes = browser . selected_indexes ( 3 )
if not seltypes :
return
elementi = selelements [ 0 ]
typi = seltypes [ 0 ]
if not elementi . isValid ( ) or not typi . isValid ( ) :
return
element = elementi . internalPointer ( ) . internal_data ( )
typ = typi . internalPointer ( ) . internal_data ( ) [ 0 ]
reftrack . Reftrack ( self . root , self . refobjinter , typ = typ , element = element ) |
def publish ( self , request : Request ) -> None :
"""Dispatches a request . Expects zero or more target handlers
: param request : The request to dispatch
: return : None .""" | handler_factories = self . _registry . lookup ( request )
for factory in handler_factories :
handler = factory ( )
handler . handle ( request ) |
def padded_to_same_length ( seq1 , seq2 , item = 0 ) :
"""Return a pair of sequences of the same length by padding the shorter
sequence with ` ` item ` ` .
The padded sequence is a tuple . The unpadded sequence is returned as - is .""" | len1 , len2 = len ( seq1 ) , len ( seq2 )
if len1 == len2 :
return ( seq1 , seq2 )
elif len1 < len2 :
return ( cons . ed ( seq1 , yield_n ( len2 - len1 , item ) ) , seq2 )
else :
return ( seq1 , cons . ed ( seq2 , yield_n ( len1 - len2 , item ) ) ) |
def ring_number ( self ) :
"""The number of the ring that has changed state ,
with 0 being the first ring .
On tablets with only one ring , this method always returns 0.
For events not of type
: attr : ` ~ libinput . constant . EventType . TABLET _ PAD _ RING ` , this property
raises : exc : ` AssertionError ` .
Returns :
int : The index of the ring that changed state .
Raises :
AssertionError""" | if self . type != EventType . TABLET_PAD_RING :
raise AttributeError ( _wrong_prop . format ( self . type ) )
return self . _libinput . libinput_event_tablet_pad_get_ring_number ( self . _handle ) |
def lookup_module_ident ( id , version ) :
"""Return the ` ` module _ ident ` ` for the given ` ` id ` ` &
major and minor version as a tuple .""" | with db_connect ( ) as db_conn :
with db_conn . cursor ( ) as cursor :
cursor . execute ( "SELECT module_ident FROM modules " "WHERE uuid = %s " "AND CONCAT_WS('.', major_version, minor_version) = %s" , ( id , version ) )
try :
mident = cursor . fetchone ( ) [ 0 ]
except ( IndexError , TypeError ) :
ident_hash = join_ident_hash ( id , version )
raise RuntimeError ( "Content at {} does not exist." . format ( ident_hash ) )
return mident |
def on_mouse_press ( self , x , y , buttons , modifiers ) :
"""Set the start point of the drag .""" | self . view [ 'ball' ] . set_state ( Trackball . STATE_ROTATE )
if ( buttons == pyglet . window . mouse . LEFT ) :
ctrl = ( modifiers & pyglet . window . key . MOD_CTRL )
shift = ( modifiers & pyglet . window . key . MOD_SHIFT )
if ( ctrl and shift ) :
self . view [ 'ball' ] . set_state ( Trackball . STATE_ZOOM )
elif shift :
self . view [ 'ball' ] . set_state ( Trackball . STATE_ROLL )
elif ctrl :
self . view [ 'ball' ] . set_state ( Trackball . STATE_PAN )
elif ( buttons == pyglet . window . mouse . MIDDLE ) :
self . view [ 'ball' ] . set_state ( Trackball . STATE_PAN )
elif ( buttons == pyglet . window . mouse . RIGHT ) :
self . view [ 'ball' ] . set_state ( Trackball . STATE_ZOOM )
self . view [ 'ball' ] . down ( np . array ( [ x , y ] ) )
self . scene . camera . transform = self . view [ 'ball' ] . pose |
def verify_processing_options ( opt , parser ) :
"""Parses the processing scheme options and verifies that they are
reasonable .
Parameters
opt : object
Result of parsing the CLI with OptionParser , or any object with the
required attributes .
parser : object
OptionParser instance .""" | scheme_types = scheme_prefix . values ( )
if opt . processing_scheme . split ( ':' ) [ 0 ] not in scheme_types :
parser . error ( "(%s) is not a valid scheme type." ) |
def train ( self , data , label , batch_size ) :
"""Description : training for LipNet""" | # pylint : disable = no - member
sum_losses = 0
len_losses = 0
with autograd . record ( ) :
losses = [ self . loss_fn ( self . net ( X ) , Y ) for X , Y in zip ( data , label ) ]
for loss in losses :
sum_losses += mx . nd . array ( loss ) . sum ( ) . asscalar ( )
len_losses += len ( loss )
loss . backward ( )
self . trainer . step ( batch_size )
return sum_losses , len_losses |
def shift_hue ( image , hue ) :
"""Shifts the hue of an image in HSV format .
: param image : PIL Image to perform operation on
: param hue : value between 0 and 2.0""" | hue = ( hue - 1.0 ) * 180
img = image . copy ( ) . convert ( "HSV" )
pixels = img . load ( )
for i in range ( img . width ) :
for j in range ( img . height ) :
h , s , v = pixels [ i , j ]
h = abs ( int ( h + hue ) )
if h > 255 :
h -= 255
pixels [ i , j ] = ( h , s , v )
return img . convert ( "RGBA" ) |
def upload_files ( self , container , src_dst_map , content_type = None ) :
"""Upload multiple files .""" | if not content_type :
content_type = "application/octet.stream"
url = self . make_url ( container , None , None )
headers = self . _base_headers
multi_files = [ ]
try :
for src_path in src_dst_map :
dst_name = src_dst_map [ src_path ]
if not dst_name :
dst_name = os . path . basename ( src_path )
multi_files . append ( ( 'files' , ( dst_name , open ( src_path , 'rb' ) , content_type ) ) )
rsp = requests . post ( url , headers = headers , files = multi_files , timeout = self . _timeout )
except requests . exceptions . ConnectionError as e :
RestHttp . _raise_conn_error ( e )
finally :
for n , info in multi_files :
dst , f , ctype = info
f . close ( )
return self . _handle_response ( rsp ) |
def dragdrop ( self , chviewer , uris ) :
"""Called when a drop operation is performed on a channel viewer .
We are called back with a URL and we attempt to ( down ) load it if it
names a file .""" | # find out our channel
chname = self . get_channel_name ( chviewer )
self . open_uris ( uris , chname = chname )
return True |
def _set_show_portindex_interface_info ( self , v , load = False ) :
"""Setter method for show _ portindex _ interface _ info , mapped from YANG variable / brocade _ fabric _ service _ rpc / show _ portindex _ interface _ info ( rpc )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ show _ portindex _ interface _ info is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ show _ portindex _ interface _ info ( ) directly .
YANG Description : Provides the details of 10G Ethernet and fibrechannel
over ethernet ports . It consists of port index of the
RBridge , port type ( Te or FCOE ) and port interface .
Port interface is in the format
rbridge - id / slot / port for Te ports and
vlan - id / rbridge - id / port for FCOE ports .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = show_portindex_interface_info . show_portindex_interface_info , is_leaf = True , yang_name = "show-portindex-interface-info" , rest_name = "show-portindex-interface-info" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = False , extensions = { u'tailf-common' : { u'hidden' : u'rpccmd' , u'actionpoint' : u'show_portindex_interface_all' } } , namespace = 'urn:brocade.com:mgmt:brocade-fabric-service' , defining_module = 'brocade-fabric-service' , yang_type = 'rpc' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """show_portindex_interface_info must be of a type compatible with rpc""" , 'defined-type' : "rpc" , 'generated-type' : """YANGDynClass(base=show_portindex_interface_info.show_portindex_interface_info, is_leaf=True, yang_name="show-portindex-interface-info", rest_name="show-portindex-interface-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'show_portindex_interface_all'}}, namespace='urn:brocade.com:mgmt:brocade-fabric-service', defining_module='brocade-fabric-service', yang_type='rpc', is_config=True)""" , } )
self . __show_portindex_interface_info = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def binary_construct ( tokens ) :
"""Construct proper instructions for binary expressions from a
sequence of tokens at the same precedence level . For instance , if
the tokens represent " 1 + 2 + 3 " , this will return the instruction
array " 1 2 add _ op 3 add _ op " .
: param tokens : The sequence of tokens .
: returns : An instance of ` ` Instructions ` ` containing the list of
instructions .""" | # Initialize the list of instructions we will return with the
# left - most element
instructions = [ tokens [ 0 ] ]
# Now process all the remaining tokens , building up the array we
# will return
for i in range ( 1 , len ( tokens ) , 2 ) :
op , rhs = tokens [ i : i + 2 ]
# Add the right - hand side
instructions . append ( rhs )
# Now apply constant folding
instructions [ - 2 : ] = op . fold ( instructions [ - 2 : ] )
return instructions |
def cli ( env , billing_id , datacenter ) :
"""Adds a load balancer given the id returned from create - options .""" | mgr = SoftLayer . LoadBalancerManager ( env . client )
if not formatting . confirm ( "This action will incur charges on your " "account. Continue?" ) :
raise exceptions . CLIAbort ( 'Aborted.' )
mgr . add_local_lb ( billing_id , datacenter = datacenter )
env . fout ( "Load balancer is being created!" ) |
def _convert_ddb_list_to_list ( conversion_list ) :
"""Given a dynamodb list , it will return a python list without the dynamodb
datatypes
Args :
conversion _ list ( dict ) : a dynamodb list which includes the
datatypes
Returns :
list : Returns a sanitized list without the dynamodb datatypes""" | ret_list = [ ]
for v in conversion_list :
for v1 in v :
ret_list . append ( v [ v1 ] )
return ret_list |
def arc_consistency_3 ( domains , constraints ) :
"""Makes a CSP problem arc consistent .
Ignores any constraint that is not binary .""" | arcs = list ( all_arcs ( constraints ) )
pending_arcs = set ( arcs )
while pending_arcs :
x , y = pending_arcs . pop ( )
if revise ( domains , ( x , y ) , constraints ) :
if len ( domains [ x ] ) == 0 :
return False
pending_arcs = pending_arcs . union ( ( x2 , y2 ) for x2 , y2 in arcs if y2 == x )
return True |
def _shapeletOutput ( self , x , y , beta , shapelets , precalc = True ) :
"""returns the the numerical values of a set of shapelets at polar coordinates
: param shapelets : set of shapelets [ l = , r = , a _ lr = ]
: type shapelets : array of size ( n , 3)
: param coordPolar : set of coordinates in polar units
: type coordPolar : array of size ( n , 2)
: returns : array of same size with coords [ r , phi ]
: raises : AttributeError , KeyError""" | n = len ( np . atleast_1d ( x ) )
if n <= 1 :
values = 0.
else :
values = np . zeros ( len ( x [ 0 ] ) )
n = 0
k = 0
i = 0
num_n = len ( shapelets )
while i < num_n * ( num_n + 1 ) / 2 :
values += self . _function ( x , y , shapelets [ n - k ] [ k ] , beta , n - k , k , precalc = precalc )
k += 1
if k == n + 1 :
n += 1
k = 0
i += 1
return values |
def handle_resource_not_found ( resource ) :
"""Set resource state to ERRED and append / create " not found " error message .""" | resource . set_erred ( )
resource . runtime_state = ''
message = 'Does not exist at backend.'
if message not in resource . error_message :
if not resource . error_message :
resource . error_message = message
else :
resource . error_message += ' (%s)' % message
resource . save ( )
logger . warning ( '%s %s (PK: %s) does not exist at backend.' % ( resource . __class__ . __name__ , resource , resource . pk ) ) |
def write_column ( self , column , data , ** keys ) :
"""Write data to a column in this HDU
This HDU must be a table HDU .
parameters
column : scalar string / integer
The column in which to write . Can be the name or number ( 0 offset )
column : ndarray
Numerical python array to write . This should match the
shape of the column . You are probably better using
fits . write _ table ( ) to be sure .
firstrow : integer , optional
At which row you should begin writing . Be sure you know what you
are doing ! For appending see the append ( ) method . Default 0.""" | firstrow = keys . get ( 'firstrow' , 0 )
colnum = self . _extract_colnum ( column )
# need it to be contiguous and native byte order . For now , make a
# copy . but we may be able to avoid this with some care .
if not data . flags [ 'C_CONTIGUOUS' ] : # this always makes a copy
data_send = numpy . ascontiguousarray ( data )
# this is a copy , we can make sure it is native
# and modify in place if needed
array_to_native ( data_send , inplace = True )
else : # we can avoid the copy with a try - finally block and
# some logic
data_send = array_to_native ( data , inplace = False )
if IS_PY3 and data_send . dtype . char == 'U' : # for python3 , we convert unicode to ascii
# this will error if the character is not in ascii
data_send = data_send . astype ( 'S' , copy = False )
self . _verify_column_data ( colnum , data_send )
self . _FITS . write_column ( self . _ext + 1 , colnum + 1 , data_send , firstrow = firstrow + 1 , write_bitcols = self . write_bitcols )
del data_send
self . _update_info ( ) |
def from_sys_requirements ( cls , system_requirements , _type = 'all' ) :
"""Returns SystemRequirementsDict encapsulating system requirements .
It can extract only entrypoints with specific fields ( ' clusterSpec ' ,
' instanceType ' , etc ) , depending on the value of _ type .""" | if _type not in ( 'all' , 'clusterSpec' , 'instanceType' ) :
raise DXError ( "Expected '_type' to be either 'all', 'clusterSpec', or 'instanceType'" )
if _type == 'all' :
return cls ( system_requirements )
extracted = defaultdict ( dict )
for entrypoint , req in system_requirements . items ( ) :
if _type in req :
extracted [ entrypoint ] [ _type ] = req [ _type ]
return cls ( dict ( extracted ) ) |
def pauli_sum ( * elements : Pauli ) -> Pauli :
"""Return the sum of elements of the Pauli algebra""" | terms = [ ]
key = itemgetter ( 0 )
for term , grp in groupby ( heapq . merge ( * elements , key = key ) , key = key ) :
coeff = sum ( g [ 1 ] for g in grp )
if not isclose ( coeff , 0.0 ) :
terms . append ( ( term , coeff ) )
return Pauli ( tuple ( terms ) ) |
def tune ( runner , kernel_options , device_options , tuning_options ) :
"""Find the best performing kernel configuration in the parameter space
: params runner : A runner from kernel _ tuner . runners
: type runner : kernel _ tuner . runner
: param kernel _ options : A dictionary with all options for the kernel .
: type kernel _ options : dict
: param device _ options : A dictionary with all options for the device
on which the kernel should be tuned .
: type device _ options : dict
: param tuning _ options : A dictionary with all options regarding the tuning
process .
: type tuning _ options : dict
: returns : A list of dictionaries for executed kernel configurations and their
execution times . And a dictionary that contains a information
about the hardware / software environment on which the tuning took place .
: rtype : list ( dict ( ) ) , dict ( )""" | results = [ ]
cache = { }
method = tuning_options . method
# scale variables in x to make ' eps ' relevant for multiple variables
tuning_options [ "scaling" ] = True
bounds , x0 , eps = get_bounds_x0_eps ( tuning_options )
kwargs = setup_method_arguments ( method , bounds )
options = setup_method_options ( method , tuning_options )
kwargs [ 'options' ] = options
args = ( kernel_options , tuning_options , runner , results , cache )
minimizer_kwargs = dict ( ** kwargs )
minimizer_kwargs [ "method" ] = method
minimizer_kwargs [ "args" ] = args
opt_result = scipy . optimize . basinhopping ( _cost_func , x0 , stepsize = eps , minimizer_kwargs = minimizer_kwargs , disp = tuning_options . verbose )
if tuning_options . verbose :
print ( opt_result . message )
return results , runner . dev . get_environment ( ) |
def set_value ( self , option , value , index = None ) :
"""Sets the value on the given option .
: param option : The name of the option as it appears in the config file
: param value : The value that is being applied . If this section is indexed then the
value must be a list ( to be applied directly ) or you must supply the index parameter ,
which will cause the value to be inserted into an existing list .
: param index : If the attribute is indexed , we will use this index to insert
the value you have supplied .
: return : an instance of itself so that you can chain setting values together .""" | if self . is_indexed and index is None and not isinstance ( value , list ) :
raise TypeError ( "Value should be a list when not giving an index in an indexed header" )
self . values [ option ] . set_value ( value = value , index = index )
return self |
def setaty ( self , content ) :
"""Grab the ( aty ) soap - enc : arrayType and attach it to the
content for proper array processing later in end ( ) .
@ param content : The current content being unmarshalled .
@ type content : L { Content }
@ return : self
@ rtype : L { Encoded }""" | name = 'arrayType'
ns = ( None , 'http://schemas.xmlsoap.org/soap/encoding/' )
aty = content . node . get ( name , ns )
if aty is not None :
content . aty = aty
parts = aty . split ( '[' )
ref = parts [ 0 ]
if len ( parts ) == 2 :
self . applyaty ( content , ref )
else :
pass
# (2 ) dimensional array
return self |
def map ( requests , prefetch = True , size = None ) :
"""Concurrently converts a list of Requests to Responses .
: param requests : a collection of Request objects .
: param prefetch : If False , the content will not be downloaded immediately .
: param size : Specifies the number of requests to make at a time . If None , no throttling occurs .""" | if size :
pool = Pool ( size )
pool . map ( send , requests )
pool . join ( )
else :
jobs = [ gevent . spawn ( send , r ) for r in requests ]
gevent . joinall ( jobs )
if prefetch :
[ r . response . content for r in requests ]
return [ r . response for r in requests ] |
def check_key ( data_object , key , cardinal = False ) :
"""Update the value of an index key by matching values or getting positionals .""" | itype = ( int , np . int32 , np . int64 )
if not isinstance ( key , itype + ( slice , tuple , list , np . ndarray ) ) :
raise KeyError ( "Unknown key type {} for key {}" . format ( type ( key ) , key ) )
keys = data_object . index . values
if cardinal and data_object . _cardinal is not None :
keys = data_object [ data_object . _cardinal [ 0 ] ] . unique ( )
elif isinstance ( key , itype ) and key in keys :
key = list ( sorted ( data_object . index . values [ key ] ) )
elif isinstance ( key , itype ) and key < 0 :
key = list ( sorted ( data_object . index . values [ key ] ) )
elif isinstance ( key , itype ) :
key = [ key ]
elif isinstance ( key , slice ) :
key = list ( sorted ( data_object . index . values [ key ] ) )
elif isinstance ( key , ( tuple , list , pd . Index ) ) and not np . all ( k in keys for k in key ) :
key = list ( sorted ( data_object . index . values [ key ] ) )
return key |
def load_fast_format ( filename ) :
"""Load a reach instance in fast format .
As described above , the fast format stores the words and vectors of the
Reach instance separately , and is drastically faster than loading from
. txt files .
Parameters
filename : str
The filename prefix from which to load . Note that this is not a
real filepath as such , but a shared prefix for both files .
In order for this to work , both { filename } _ words . json and
{ filename } _ vectors . npy should be present .""" | words , unk_index , name , vectors = Reach . _load_fast ( filename )
return Reach ( vectors , words , unk_index = unk_index , name = name ) |
def ensure_keys ( walk , * keys ) :
"""Use walk to try to retrieve all keys""" | all_retrieved = True
for k in keys :
try :
walk ( k )
except WalkKeyNotRetrieved :
all_retrieved = False
return all_retrieved |
def is_team_member ( name , team_name , profile = "github" ) :
'''Returns True if the github user is in the team with team _ name , or False
otherwise .
name
The name of the user whose membership to check .
team _ name
The name of the team to check membership in .
profile
The name of the profile configuration to use . Defaults to ` ` github ` ` .
CLI Example :
. . code - block : : bash
salt myminion github . is _ team _ member ' user _ name ' ' team _ name '
. . versionadded : : 2016.11.0''' | return name . lower ( ) in list_team_members ( team_name , profile = profile ) |
def ignore_broken_pipe ( ) :
"""If a shellish program has redirected stdio it is subject to erroneous
" ignored " exceptions during the interpretor shutdown . This essentially
beats the interpretor to the punch by closing them early and ignoring any
broken pipe exceptions .""" | for f in sys . stdin , sys . stdout , sys . stderr :
try :
f . close ( )
except BrokenPipeError :
pass |
def update_current_state ( self , wid , key , value ) :
'''Update current state with a ( possibly new ) value associated with key
If the key does not represent an existing entry , then ignore it''' | cscoll = self . current_state ( )
c_state = cscoll . get ( wid , { } )
if key in c_state :
current_value = c_state . get ( key , None )
if current_value != value :
c_state [ key ] = value
setattr ( self , '_current_state_hydrated_changed' , True ) |
def send_to_delivery_stream ( events , stream_name ) :
"""Sends a list of events to a Firehose delivery stream .""" | if not events :
logger . info ( "No events provided: nothing delivered to Firehose" )
return
records = [ ]
for event in events :
if not isinstance ( event , str ) : # csv events already have a newline
event = json . dumps ( event ) + "\n"
records . append ( { "Data" : event } )
firehose = boto3 . client ( "firehose" )
logger . info ( "Delivering %s records to Firehose stream '%s'" , len ( records ) , stream_name )
resp = firehose . put_record_batch ( DeliveryStreamName = stream_name , Records = records )
return resp |
def peer_status ( ) :
'''Return peer status information
The return value is a dictionary with peer UUIDs as keys and dicts of peer
information as values . Hostnames are listed in one list . GlusterFS separates
one of the hostnames but the only reason for this seems to be which hostname
happens to be used first in peering .
CLI Example :
. . code - block : : bash
salt ' * ' glusterfs . peer _ status
GLUSTER direct CLI example ( to show what salt is sending to gluster ) :
$ gluster peer status
GLUSTER CLI 3.4.4 return example ( so we know what we are parsing ) :
Number of Peers : 2
Hostname : ftp2
Port : 24007
Uuid : cbcb256b - e66e - 4ec7 - a718-21082d396c24
State : Peer in Cluster ( Connected )
Hostname : ftp3
Uuid : 5ea10457-6cb2-427b - a770-7897509625e9
State : Peer in Cluster ( Connected )''' | root = _gluster_xml ( 'peer status' )
if not _gluster_ok ( root ) :
return None
result = { }
for peer in _iter ( root , 'peer' ) :
uuid = peer . find ( 'uuid' ) . text
result [ uuid ] = { 'hostnames' : [ ] }
for item in peer :
if item . tag == 'hostname' :
result [ uuid ] [ 'hostnames' ] . append ( item . text )
elif item . tag == 'hostnames' :
for hostname in item :
if hostname . text not in result [ uuid ] [ 'hostnames' ] :
result [ uuid ] [ 'hostnames' ] . append ( hostname . text )
elif item . tag != 'uuid' :
result [ uuid ] [ item . tag ] = item . text
return result |
def exit ( self ) -> None :
"""Raise SystemExit with correct status code and output logs .""" | total = sum ( len ( logs ) for logs in self . logs . values ( ) )
if self . json :
self . logs [ 'total' ] = total
print ( json . dumps ( self . logs , indent = self . indent ) )
else :
for name , log in self . logs . items ( ) :
if not log or self . parser [ name ] . as_bool ( "quiet" ) :
continue
print ( "[[{0}]]" . format ( name ) )
getattr ( snekchek . format , name + "_format" ) ( log )
print ( "\n" )
print ( "-" * 30 )
print ( "Total:" , total )
sys . exit ( self . status_code ) |
def do_rmdep ( self , args ) :
"""Removes dependent variables currently set for plotting / tabulating etc .""" | for arg in args . split ( ) :
if arg in self . curargs [ "dependents" ] :
self . curargs [ "dependents" ] . remove ( arg )
if arg in self . curargs [ "plottypes" ] :
del self . curargs [ "plottypes" ] [ arg ]
if arg in self . curargs [ "twinplots" ] :
del self . curargs [ "twinplots" ] [ arg ]
if arg in self . curargs [ "colors" ] :
del self . curargs [ "colors" ] [ arg ]
if arg in self . curargs [ "labels" ] :
del self . curargs [ "labels" ] [ arg ]
if arg in self . curargs [ "markers" ] :
del self . curargs [ "markers" ] [ arg ]
if arg in self . curargs [ "lines" ] :
del self . curargs [ "lines" ] [ arg ] |
def from_credentials_db ( client_secrets , storage , api_version = "v3" , readonly = False , http_client = None , ga_hook = None ) :
"""Create a client for a web or installed application .
Create a client with a credentials stored in stagecraft db .
Args :
client _ secrets : dict , client secrets ( downloadable from
Google API Console )
storage : stagecraft . apps . collectors . libs . ga . CredentialStorage ,
a Storage implementation to store credentials .
readonly : bool , default False , if True only readonly access is requested
from GA .
http _ client : httplib2 . Http , Override the default http client used .
ga _ hook : function , a hook that is called every time a query is made
against GA .""" | credentials = storage . get ( )
return Client ( _build ( credentials , api_version , http_client ) , ga_hook ) |
def send_notice ( self , room_id , text_content , timestamp = None ) :
"""Perform PUT / rooms / $ room _ id / send / m . room . message with m . notice msgtype
Args :
room _ id ( str ) : The room ID to send the event in .
text _ content ( str ) : The m . notice body to send .
timestamp ( int ) : Set origin _ server _ ts ( For application services only )""" | body = { "msgtype" : "m.notice" , "body" : text_content }
return self . send_message_event ( room_id , "m.room.message" , body , timestamp = timestamp ) |
def read ( self , size = None ) :
"""Read a length of bytes . Return empty on EOF . If ' size ' is omitted ,
return whole file .""" | if size is not None :
return self . __sf . read ( size )
block_size = self . __class__ . __block_size
b = bytearray ( )
received_bytes = 0
while 1 :
partial = self . __sf . read ( block_size )
# self . _ _ log . debug ( " Reading ( % d ) bytes . ( % d ) bytes returned . " %
# ( block _ size , len ( partial ) ) )
b . extend ( partial )
received_bytes += len ( partial )
if len ( partial ) < block_size :
self . __log . debug ( "End of file." )
break
self . __log . debug ( "Read (%d) bytes for total-file." % ( received_bytes ) )
return b |
def process_file ( filename ) :
"""Read a file from disk and parse it into a structured dict .""" | try :
with codecs . open ( filename , encoding = 'utf-8' , mode = 'r' ) as f :
file_contents = f . read ( )
except IOError :
log . info ( 'Unable to read file: %s' , filename )
return None
data = json . loads ( file_contents )
title = ''
body_content = ''
if 'current_page_name' in data :
path = data [ 'current_page_name' ]
else :
log . info ( 'Unable to index file due to no name %s' , filename )
return None
if 'body' in data and data [ 'body' ] :
body = PyQuery ( data [ 'body' ] )
body_content = body . text ( ) . replace ( u'¶' , '' )
else :
log . info ( 'Unable to index content for: %s' , filename )
if 'title' in data :
title = data [ 'title' ]
if title . startswith ( '<' ) :
title = PyQuery ( data [ 'title' ] ) . text ( )
else :
log . info ( 'Unable to index title for: %s' , filename )
return { 'headers' : process_headers ( data , filename ) , 'content' : body_content , 'path' : path , 'title' : title } |
def register ( self , * model_list , ** options ) :
"""Registers the given model ( s ) with the given databrowse site .
The model ( s ) should be Model classes , not instances .
If a databrowse class isn ' t given , it will use DefaultModelDatabrowse
( the default databrowse options ) .
If a model is already registered , this will raise AlreadyRegistered .""" | databrowse_class = options . pop ( 'databrowse_class' , DefaultModelDatabrowse )
for model in model_list :
if model in self . registry :
raise AlreadyRegistered ( 'The model %s is already registered' % model . __name__ )
self . registry [ model ] = databrowse_class |
def get ( self , fallback = not_set ) :
"""Returns config value .
See Also :
: meth : ` . set ` and : attr : ` . value `""" | envvar_value = self . _get_envvar_value ( )
if envvar_value is not not_set :
return envvar_value
if self . has_value :
if self . _value is not not_set :
return self . _value
else :
return copy . deepcopy ( self . default )
elif fallback is not not_set :
return fallback
elif self . required :
raise RequiredValueMissing ( name = self . name , item = self )
return fallback |
def convert_dict_to_compatible_tensor ( values , targets ) :
"""Converts dict ` values ` in tensors that are compatible with ` targets ` .
Args :
values : A dict to objects to convert with same keys as ` targets ` .
targets : A dict returned by ` parse _ tensor _ info _ map ` .
Returns :
A map with the same keys as ` values ` but values converted into
Tensor / SparseTensors that can be fed into ` protomap ` .
Raises :
TypeError : If it fails to convert .""" | result = { }
for key , value in sorted ( values . items ( ) ) :
result [ key ] = _convert_to_compatible_tensor ( value , targets [ key ] , error_prefix = "Can't convert %r" % key )
return result |
def logout ( self ) :
"""logout func ( quit browser )""" | try :
self . browser . quit ( )
except Exception :
raise exceptions . BrowserException ( self . brow_name , "not started" )
return False
self . vbro . stop ( )
logger . info ( "logged out" )
return True |
def absent ( name , auth = None , ** kwargs ) :
'''Ensure a security group does not exist
name
Name of the security group''' | ret = { 'name' : name , 'changes' : { } , 'result' : True , 'comment' : '' }
kwargs = __utils__ [ 'args.clean_kwargs' ] ( ** kwargs )
__salt__ [ 'neutronng.setup_clouds' ] ( auth )
kwargs [ 'project_id' ] = __salt__ [ 'keystoneng.project_get' ] ( name = kwargs [ 'project_name' ] )
secgroup = __salt__ [ 'neutronng.security_group_get' ] ( name = name , filters = { 'project_id' : kwargs [ 'project_id' ] } )
if secgroup :
if __opts__ [ 'test' ] is True :
ret [ 'result' ] = None
ret [ 'changes' ] = { 'id' : secgroup . id }
ret [ 'comment' ] = 'Security group will be deleted.'
return ret
__salt__ [ 'neutronng.security_group_delete' ] ( name = secgroup )
ret [ 'changes' ] [ 'id' ] = name
ret [ 'comment' ] = 'Deleted security group'
return ret |
def list_functions ( * args , ** kwargs ) : # pylint : disable = unused - argument
'''List the functions for all modules . Optionally , specify a module or modules
from which to list .
CLI Example :
. . code - block : : bash
salt ' * ' sys . list _ functions
salt ' * ' sys . list _ functions sys
salt ' * ' sys . list _ functions sys user
Function names can be specified as globs .
. . versionadded : : 2015.5.0
. . code - block : : bash
salt ' * ' sys . list _ functions ' sys . list _ * '
. . versionadded : : ?
. . code - block : : bash
salt ' * ' sys . list _ functions ' module . specific _ function ' ''' | # # # NOTE : * * kwargs is used here to prevent a traceback when garbage
# # # arguments are tacked on to the end .
if not args : # We ' re being asked for all functions
return sorted ( __salt__ )
names = set ( )
for module in args :
if '*' in module or '.' in module :
for func in fnmatch . filter ( __salt__ , module ) :
names . add ( func )
else : # " sys " should just match sys without also matching sysctl
moduledot = module + '.'
for func in __salt__ :
if func . startswith ( moduledot ) :
names . add ( func )
return sorted ( names ) |
def readTFAM ( fileName ) :
"""Reads the TFAM file .
: param fileName : the name of the ` ` tfam ` ` file .
: type fileName : str
: returns : a representation the ` ` tfam ` ` file ( : py : class : ` numpy . array ` ) .""" | # Saving the TFAM file
tfam = None
with open ( fileName , 'r' ) as inputFile :
tfam = [ tuple ( i . rstrip ( "\r\n" ) . split ( "\t" ) ) for i in inputFile . readlines ( ) ]
tfam = np . array ( tfam )
return tfam |
def output_story_prefixes ( self ) :
"""Writes the set of prefixes to a file this is useful for pretty
printing in results . latex _ output .""" | if not self . test_story :
raise NotImplementedError ( "I want to write the prefixes to a file" "called <test_story>_prefixes.txt, but there's no test_story." )
fn = os . path . join ( TGT_DIR , "%s_prefixes.txt" % self . test_story )
with open ( fn , "w" ) as f :
for utter_id in self . test_prefixes :
print ( utter_id . split ( "/" ) [ 1 ] , file = f ) |
def as_dictionary ( self ) :
"""Return the service agreement template as a dictionary .
: return : dict""" | template = { 'contractName' : self . contract_name , 'events' : [ e . as_dictionary ( ) for e in self . agreement_events ] , 'fulfillmentOrder' : self . fulfillment_order , 'conditionDependency' : self . condition_dependency , 'conditions' : [ cond . as_dictionary ( ) for cond in self . conditions ] }
return { # ' type ' : self . DOCUMENT _ TYPE ,
'name' : self . name , 'creator' : self . creator , 'serviceAgreementTemplate' : template } |
def _processImpurityMatrix ( self ) :
"""Process the impurity matrix so that it can be used to correct
observed reporter intensities .""" | processedMatrix = _normalizeImpurityMatrix ( self . impurityMatrix )
processedMatrix = _padImpurityMatrix ( processedMatrix , self . matrixPreChannels , self . matrixPostChannels )
processedMatrix = _transposeMatrix ( processedMatrix )
return processedMatrix |
def check_ellipsis ( text ) :
"""Use an ellipsis instead of three dots .""" | err = "typography.symbols.ellipsis"
msg = u"'...' is an approximation, use the ellipsis symbol '…'."
regex = "\.\.\."
return existence_check ( text , [ regex ] , err , msg , max_errors = 3 , require_padding = False , offset = 0 ) |
def get_chat_server ( self , channel ) :
"""Get an appropriate chat server for the given channel
Usually the server is irc . twitch . tv . But because of the delicate
twitch chat , they use a lot of servers . Big events are on special
event servers . This method tries to find a good one .
: param channel : the channel with the chat
: type channel : : class : ` models . Channel `
: returns : the server address and port
: rtype : ( : class : ` str ` , : class : ` int ` )
: raises : None""" | r = self . oldapi_request ( 'GET' , 'channels/%s/chat_properties' % channel . name )
json = r . json ( )
servers = json [ 'chat_servers' ]
try :
r = self . get ( TWITCH_STATUSURL )
except requests . HTTPError :
log . debug ( 'Error getting chat server status. Using random one.' )
address = servers [ 0 ]
else :
stats = [ client . ChatServerStatus ( ** d ) for d in r . json ( ) ]
address = self . _find_best_chat_server ( servers , stats )
server , port = address . split ( ':' )
return server , int ( port ) |
def layerFromSource ( source ) :
'''Returns the layer from the current project with the passed source
Raises WrongLayerSourceException if no layer with that source is found''' | layers = _layerreg . mapLayers ( ) . values ( )
for layer in layers :
if layer . source ( ) == source :
return layer
raise WrongLayerSourceException ( ) |
def update_one ( self , id_ , data , using_name = True ) :
"""Update one record . Any fields you don ' t specify will remain unchanged .
Ref : http : / / helpdesk . knackhq . com / support / solutions / articles / 5000446111 - api - reference - root - access # update
: param id _ : record id _
: param data : the new data fields and values
: param using _ name : if you are using field name in data ,
please set using _ name = True ( it ' s the default ) , otherwise , False
* * 中文文档 * *
对一条记录进行更新""" | data = self . convert_values ( data )
if using_name :
data = self . convert_keys ( data )
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % ( self . key , id_ )
res = self . put ( url , data )
return res |
def get_dict_diff_str ( d1 , d2 , title ) :
"""Returns same as ` get _ dict _ diff ` , but as a readable string .""" | added , removed , changed = get_dict_diff ( d1 , d2 )
lines = [ title ]
if added :
lines . append ( "Added attributes: %s" % [ '.' . join ( x ) for x in added ] )
if removed :
lines . append ( "Removed attributes: %s" % [ '.' . join ( x ) for x in removed ] )
if changed :
lines . append ( "Changed attributes: %s" % [ '.' . join ( x ) for x in changed ] )
return '\n' . join ( lines ) |
def form_node_label_prediction_matrix ( y_pred , y_test ) :
"""Given the discriminator distances , this function forms the node - label prediction matrix .
It is assumed that the number of true labels is known .
Inputs : - y _ pred : A NumPy array that contains the distance from the discriminator for each label for each user .
- y _ test : The node - label ground truth for the test set in a SciPy sparse CSR matrix format .
Outputs : - y _ pred : The node - label prediction for the test set in a SciPy sparse CSR matrix format .""" | number_of_test_nodes = y_pred . shape [ 0 ]
# We calculate the number of true labels for each node .
true_number_of_labels = np . squeeze ( y_test . sum ( axis = 1 ) )
# We sort the prediction array for each node .
index = np . argsort ( y_pred , axis = 1 )
row = np . empty ( y_test . getnnz ( ) , dtype = np . int64 )
col = np . empty ( y_test . getnnz ( ) , dtype = np . int64 )
start = 0
for n in np . arange ( number_of_test_nodes ) :
end = start + true_number_of_labels [ 0 , n ]
row [ start : end ] = n
col [ start : end ] = index [ n , - 1 : - true_number_of_labels [ 0 , n ] - 1 : - 1 ]
start = end
data = np . ones_like ( row , dtype = np . int8 )
y_pred = sparse . coo_matrix ( ( data , ( row , col ) ) , shape = y_test . shape )
return y_pred |
def _clean_java_out ( version_str ) :
"""Remove extra environmental information reported in java when querying for versions .
Java will report information like _ JAVA _ OPTIONS environmental variables in the output .""" | out = [ ]
for line in version_str . decode ( ) . split ( "\n" ) :
if line . startswith ( "Picked up" ) :
pass
if line . find ( "setlocale" ) > 0 :
pass
else :
out . append ( line )
return "\n" . join ( out ) |
def forward_list ( self ) :
'''adb forward - - list''' | version = self . version ( )
if int ( version [ 1 ] ) <= 1 and int ( version [ 2 ] ) <= 0 and int ( version [ 3 ] ) < 31 :
raise EnvironmentError ( "Low adb version." )
lines = self . raw_cmd ( "forward" , "--list" ) . communicate ( ) [ 0 ] . decode ( "utf-8" ) . strip ( ) . splitlines ( )
return [ line . strip ( ) . split ( ) for line in lines ] |
def parse_source_file ( filename ) :
"""Parse source file into AST node
Parameters
filename : str
File path
Returns
node : AST node
content : utf - 8 encoded string""" | # can ' t use codecs . open ( filename , ' r ' , ' utf - 8 ' ) here b / c ast doesn ' t
# work with unicode strings in Python2.7 " SyntaxError : encoding
# declaration in Unicode string " In python 2.7 the string can ' t be
# encoded and have information about its encoding . That is particularly
# problematic since source files include in their header information
# about the file encoding .
# Minimal example to fail : ast . parse ( u ' # - * - coding : utf - 8 - * - ' )
with open ( filename , 'rb' ) as fid :
content = fid . read ( )
# change from Windows format to UNIX for uniformity
content = content . replace ( b'\r\n' , b'\n' )
try :
node = ast . parse ( content )
return node , content . decode ( 'utf-8' )
except SyntaxError :
return None , content . decode ( 'utf-8' ) |
def to_edgelist ( self ) :
"""Export the current transforms as a list of edge tuples , with
each tuple having the format :
( node _ a , node _ b , { metadata } )
Returns
edgelist : ( n , ) list of tuples""" | # save cleaned edges
export = [ ]
# loop through ( node , node , edge attributes )
for edge in nx . to_edgelist ( self . transforms ) :
a , b , c = edge
# geometry is a node property but save it to the
# edge so we don ' t need two dictionaries
if 'geometry' in self . transforms . node [ b ] :
c [ 'geometry' ] = self . transforms . node [ b ] [ 'geometry' ]
# save the matrix as a float list
c [ 'matrix' ] = np . asanyarray ( c [ 'matrix' ] , dtype = np . float64 ) . tolist ( )
export . append ( ( a , b , c ) )
return export |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.