signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def find_multiple ( line , lookup ) :
"""regexp search with one value to return .
: param line : Line
: param lookup : regexp
: return : List of match groups or False""" | match = re . search ( lookup , line )
if match :
ret = [ ]
for i in range ( 1 , len ( match . groups ( ) ) + 1 ) :
ret . append ( match . group ( i ) )
if ret :
return ret
return False |
def receive_promise ( self , msg ) :
'''Returns an Accept messages if a quorum of Promise messages is achieved''' | self . observe_proposal ( msg . proposal_id )
if not self . leader and msg . proposal_id == self . proposal_id and msg . from_uid not in self . promises_received :
self . promises_received . add ( msg . from_uid )
if self . highest_accepted_id is None or msg . last_accepted_id > self . highest_accepted_id :
self . highest_accepted_id = msg . last_accepted_id
if msg . last_accepted_value is not None :
self . proposed_value = msg . last_accepted_value
if len ( self . promises_received ) == self . quorum_size :
self . leader = True
if self . proposed_value is not None :
self . current_accept_msg = Accept ( self . network_uid , self . proposal_id , self . proposed_value )
return self . current_accept_msg |
def exec_cmd ( self , command , ** kwargs ) :
"""Wrapper method that can be changed in the inheriting classes .""" | self . _is_allowed_command ( command )
self . _check_command_parameters ( ** kwargs )
return self . _exec_cmd ( command , ** kwargs ) |
def coordinate ( self , panes = [ ] , index = 0 ) :
"""Update pane coordinate tuples based on their height and width relative to other panes
within the dimensions of the current window .
We account for panes with a height of 1 where the bottom coordinates are the same as the top .
Account for floating panes and self - coordinating panes adjacent to panes set to EXPAND .
Coordinates are of the form :
( ( top - left - from - top , top - left - from - left ) ,
( top - right - from - top , top - right - from - left ) ) ,
( ( bottom - left - from - top , bottom - left - from - left ) ,
( bottom - right - from - top , bottom - right - from - left ) )
We can then use these to determine things such as whether corners are inverted and how
many characters may be drawn""" | y = 0
# height
for i , element in enumerate ( self . panes ) :
x = 0
# width
if isinstance ( element , list ) :
current_height = 0
for j , pane in enumerate ( element ) :
if pane . hidden :
continue
current_width = pane . width
current_height = pane . height
upper = ( ( y , x ) , ( y , x + current_width ) )
lower = ( ( y + ( current_height if current_height > 1 else 0 ) , x ) , ( y + ( current_height if current_height > 1 else 0 ) , x + current_width ) )
pane . coords = [ upper , lower ]
x += current_width
y += ( current_height + 1 if current_height > 1 else 1 )
else :
if element . hidden :
continue
current_width = element . width
current_height = element . height
upper = ( ( y , x ) , ( y , x + current_width ) )
lower = ( ( y + ( current_height if current_height > 1 else 0 ) , x ) , ( y + ( current_height if current_height > 1 else 0 ) , x + current_width ) )
element . coords = [ upper , lower ]
y += ( current_height + 1 if current_height > 1 else 1 )
if self . debug :
coordinates = "Coordinates: " + str ( [ p . coords for p in self ] )
if len ( coordinates ) > self . width :
coordinates = coordinates [ : self . width - 3 ]
coordinates += '...'
self . addstr ( self . height - 3 , 0 , coordinates ) |
def consumer_process_task ( processor , log_groups , check_point_tracker ) :
"""return TaskResult if failed ,
return ProcessTaskResult if succeed
: param processor :
: param log _ groups :
: param check _ point _ tracker :
: return :""" | try :
check_point = processor . process ( log_groups , check_point_tracker )
check_point_tracker . flush_check ( )
except Exception as e :
return TaskResult ( e )
return ProcessTaskResult ( check_point ) |
def _setup ( self ) :
"""Perform initial setup of the settings class , such as getting the settings module and setting the settings""" | settings_module = None
# Get the settings module from the environment variables
try :
settings_module = os . environ [ global_settings . MODULE_VARIABLE ]
except KeyError :
error_message = "Settings not properly configured. Cannot find the environment variable {0}" . format ( global_settings . MODULE_VARIABLE )
log . exception ( error_message )
self . _initialize ( settings_module )
self . _configure_logging ( ) |
def __field_to_parameter_type_and_format ( self , field ) :
"""Converts the field variant type into a tuple describing the parameter .
Args :
field : An instance of a subclass of messages . Field .
Returns :
A tuple with the type and format of the field , respectively .
Raises :
TypeError : if the field variant is a message variant .""" | # We use lowercase values for types ( e . g . ' string ' instead of ' STRING ' ) .
variant = field . variant
if variant == messages . Variant . MESSAGE :
raise TypeError ( 'A message variant can\'t be used in a parameter.' )
# Note that the 64 - bit integers are marked as strings - - this is to
# accommodate JavaScript , which would otherwise demote them to 32 - bit
# integers .
custom_variant_map = { messages . Variant . DOUBLE : ( 'number' , 'double' ) , messages . Variant . FLOAT : ( 'number' , 'float' ) , messages . Variant . INT64 : ( 'string' , 'int64' ) , messages . Variant . SINT64 : ( 'string' , 'int64' ) , messages . Variant . UINT64 : ( 'string' , 'uint64' ) , messages . Variant . INT32 : ( 'integer' , 'int32' ) , messages . Variant . SINT32 : ( 'integer' , 'int32' ) , messages . Variant . UINT32 : ( 'integer' , 'uint32' ) , messages . Variant . BOOL : ( 'boolean' , None ) , messages . Variant . STRING : ( 'string' , None ) , messages . Variant . BYTES : ( 'string' , 'byte' ) , messages . Variant . ENUM : ( 'string' , None ) , }
return custom_variant_map . get ( variant ) or ( variant . name . lower ( ) , None ) |
def _create_subviews ( path , corpus ) :
"""Load the subviews based on testing _ list . txt and validation _ list . txt""" | test_list_path = os . path . join ( path , 'testing_list.txt' )
dev_list_path = os . path . join ( path , 'validation_list.txt' )
test_list = textfile . read_separated_lines ( test_list_path , separator = '/' , max_columns = 2 )
dev_list = textfile . read_separated_lines ( dev_list_path , separator = '/' , max_columns = 2 )
test_set = set ( [ '{}_{}' . format ( os . path . splitext ( x [ 1 ] ) [ 0 ] , x [ 0 ] ) for x in test_list ] )
dev_set = set ( [ '{}_{}' . format ( os . path . splitext ( x [ 1 ] ) [ 0 ] , x [ 0 ] ) for x in dev_list ] )
inv_train_set = test_set . union ( dev_set )
train_filter = subview . MatchingUtteranceIdxFilter ( utterance_idxs = inv_train_set , inverse = True )
train_view = subview . Subview ( corpus , filter_criteria = train_filter )
corpus . import_subview ( 'train' , train_view )
dev_filter = subview . MatchingUtteranceIdxFilter ( utterance_idxs = dev_set , inverse = False )
dev_view = subview . Subview ( corpus , filter_criteria = dev_filter )
corpus . import_subview ( 'dev' , dev_view )
test_filter = subview . MatchingUtteranceIdxFilter ( utterance_idxs = test_set , inverse = False )
test_view = subview . Subview ( corpus , filter_criteria = test_filter )
corpus . import_subview ( 'test' , test_view ) |
def parseValue ( self , value ) :
"""Parse the given value and return result .""" | if self . isVector ( ) :
return list ( map ( self . _pythonType , value . split ( ',' ) ) )
if self . typ == 'boolean' :
return _parseBool ( value )
return self . _pythonType ( value ) |
def prepare_task ( self , items ) :
"""Prepare scenario for impact function variable .
: param items : Dictionary containing settings for impact function .
: type items : Python dictionary .
: return : A tuple containing True and dictionary containing parameters
if post processor success . Or False and an error message
if something went wrong .""" | status = True
message = ''
# get hazard
if 'hazard' in items :
hazard_path = items [ 'hazard' ]
hazard = self . define_layer ( hazard_path )
if not hazard :
status = False
message = self . tr ( 'Unable to find {hazard_path}' ) . format ( hazard_path = hazard_path )
else :
hazard = None
LOGGER . warning ( 'Scenario does not contain hazard path' )
# get exposure
if 'exposure' in items :
exposure_path = items [ 'exposure' ]
exposure = self . define_layer ( exposure_path )
if not exposure :
status = False
if message :
message += '\n'
message += self . tr ( 'Unable to find {exposure_path}' ) . format ( exposure_path = exposure_path )
else :
exposure = None
LOGGER . warning ( 'Scenario does not contain hazard path' )
# get aggregation
if 'aggregation' in items :
aggregation_path = items [ 'aggregation' ]
aggregation = self . define_layer ( aggregation_path )
else :
aggregation = None
LOGGER . info ( 'Scenario does not contain aggregation path' )
# get extent
if 'extent' in items :
LOGGER . info ( 'Extent coordinate is found' )
coordinates = items [ 'extent' ]
array_coord = extent_string_to_array ( coordinates )
extent = QgsRectangle ( * array_coord )
else :
extent = None
LOGGER . info ( 'Scenario does not contain extent coordinates' )
# get extent crs id
if 'extent_crs' in items :
LOGGER . info ( 'Extent CRS is found' )
crs = items [ 'extent_crs' ]
extent_crs = QgsCoordinateReferenceSystem ( crs )
else :
LOGGER . info ( 'Extent crs is not found, assuming crs to EPSG:4326' )
extent_crs = QgsCoordinateReferenceSystem ( 'EPSG:4326' )
# make sure at least hazard and exposure data are available in
# scenario . Aggregation and extent checking will be done when
# assigning layer to impact _ function
if status :
parameters = { layer_purpose_hazard [ 'key' ] : hazard , layer_purpose_exposure [ 'key' ] : exposure , layer_purpose_aggregation [ 'key' ] : aggregation , 'extent' : extent , 'crs' : extent_crs }
return True , parameters
else :
LOGGER . warning ( message )
display_critical_message_box ( title = self . tr ( 'Error while preparing scenario' ) , message = message )
return False , None |
def l2_norm ( params ) :
"""Computes l2 norm of params by flattening them into a vector .""" | flattened , _ = flatten ( params )
return np . dot ( flattened , flattened ) |
def _data_in_prefetch_buffers ( self , offset ) :
"""if a block of data is present in the prefetch buffers , at the given
offset , return the offset of the relevant prefetch buffer . otherwise ,
return None . this guarantees nothing about the number of bytes
collected in the prefetch buffer so far .""" | k = [ i for i in self . _prefetch_data . keys ( ) if i <= offset ]
if len ( k ) == 0 :
return None
index = max ( k )
buf_offset = offset - index
if buf_offset >= len ( self . _prefetch_data [ index ] ) : # it ' s not here
return None
return index |
def start_raylet ( redis_address , node_ip_address , raylet_name , plasma_store_name , worker_path , temp_dir , num_cpus = None , num_gpus = None , resources = None , object_manager_port = None , node_manager_port = None , redis_password = None , use_valgrind = False , use_profiler = False , stdout_file = None , stderr_file = None , config = None , include_java = False , java_worker_options = None , load_code_from_local = False ) :
"""Start a raylet , which is a combined local scheduler and object manager .
Args :
redis _ address ( str ) : The address of the primary Redis server .
node _ ip _ address ( str ) : The IP address of this node .
raylet _ name ( str ) : The name of the raylet socket to create .
plasma _ store _ name ( str ) : The name of the plasma store socket to connect
to .
worker _ path ( str ) : The path of the Python file that new worker
processes will execute .
temp _ dir ( str ) : The path of the temporary directory Ray will use .
num _ cpus : The CPUs allocated for this raylet .
num _ gpus : The GPUs allocated for this raylet .
resources : The custom resources allocated for this raylet .
object _ manager _ port : The port to use for the object manager . If this is
None , then the object manager will choose its own port .
node _ manager _ port : The port to use for the node manager . If this is
None , then the node manager will choose its own port .
redis _ password : The password to use when connecting to Redis .
use _ valgrind ( bool ) : True if the raylet should be started inside
of valgrind . If this is True , use _ profiler must be False .
use _ profiler ( bool ) : True if the raylet should be started inside
a profiler . If this is True , use _ valgrind must be False .
stdout _ file : A file handle opened for writing to redirect stdout to . If
no redirection should happen , then this should be None .
stderr _ file : A file handle opened for writing to redirect stderr to . If
no redirection should happen , then this should be None .
config ( dict | None ) : Optional Raylet configuration that will
override defaults in RayConfig .
include _ java ( bool ) : If True , the raylet backend can also support
Java worker .
java _ worker _ options ( str ) : The command options for Java worker .
Returns :
ProcessInfo for the process that was started .""" | config = config or { }
config_str = "," . join ( [ "{},{}" . format ( * kv ) for kv in config . items ( ) ] )
if use_valgrind and use_profiler :
raise Exception ( "Cannot use valgrind and profiler at the same time." )
num_initial_workers = ( num_cpus if num_cpus is not None else multiprocessing . cpu_count ( ) )
static_resources = check_and_update_resources ( num_cpus , num_gpus , resources )
# Limit the number of workers that can be started in parallel by the
# raylet . However , make sure it is at least 1.
num_cpus_static = static_resources . get ( "CPU" , 0 )
maximum_startup_concurrency = max ( 1 , min ( multiprocessing . cpu_count ( ) , num_cpus_static ) )
# Format the resource argument in a form like ' CPU , 1.0 , GPU , 0 , Custom , 3 ' .
resource_argument = "," . join ( [ "{},{}" . format ( * kv ) for kv in static_resources . items ( ) ] )
gcs_ip_address , gcs_port = redis_address . split ( ":" )
if include_java is True :
java_worker_options = ( java_worker_options or DEFAULT_JAVA_WORKER_OPTIONS )
java_worker_command = build_java_worker_command ( java_worker_options , redis_address , plasma_store_name , raylet_name , redis_password , os . path . join ( temp_dir , "sockets" ) , )
else :
java_worker_command = ""
# Create the command that the Raylet will use to start workers .
start_worker_command = ( "{} {} " "--node-ip-address={} " "--object-store-name={} " "--raylet-name={} " "--redis-address={} " "--temp-dir={}" . format ( sys . executable , worker_path , node_ip_address , plasma_store_name , raylet_name , redis_address , temp_dir ) )
if redis_password :
start_worker_command += " --redis-password {}" . format ( redis_password )
# If the object manager port is None , then use 0 to cause the object
# manager to choose its own port .
if object_manager_port is None :
object_manager_port = 0
# If the node manager port is None , then use 0 to cause the node manager
# to choose its own port .
if node_manager_port is None :
node_manager_port = 0
if load_code_from_local :
start_worker_command += " --load-code-from-local "
command = [ RAYLET_EXECUTABLE , "--raylet_socket_name={}" . format ( raylet_name ) , "--store_socket_name={}" . format ( plasma_store_name ) , "--object_manager_port={}" . format ( object_manager_port ) , "--node_manager_port={}" . format ( node_manager_port ) , "--node_ip_address={}" . format ( node_ip_address ) , "--redis_address={}" . format ( gcs_ip_address ) , "--redis_port={}" . format ( gcs_port ) , "--num_initial_workers={}" . format ( num_initial_workers ) , "--maximum_startup_concurrency={}" . format ( maximum_startup_concurrency ) , "--static_resource_list={}" . format ( resource_argument ) , "--config_list={}" . format ( config_str ) , "--python_worker_command={}" . format ( start_worker_command ) , "--java_worker_command={}" . format ( java_worker_command ) , "--redis_password={}" . format ( redis_password or "" ) , "--temp_dir={}" . format ( temp_dir ) , ]
process_info = start_ray_process ( command , ray_constants . PROCESS_TYPE_RAYLET , use_valgrind = use_valgrind , use_gdb = False , use_valgrind_profiler = use_profiler , use_perftools_profiler = ( "RAYLET_PERFTOOLS_PATH" in os . environ ) , stdout_file = stdout_file , stderr_file = stderr_file )
return process_info |
def wait_for_import ( self , connection_id , wait_interval ) :
"""Wait until connection state is no longer ` ` IMPORT _ CONFIGURATION ` ` .
Args :
connection _ id ( str ) : Heroku Connect connection to monitor .
wait _ interval ( int ) : How frequently to poll in seconds .
Raises :
CommandError : If fetch connection information fails .""" | self . stdout . write ( self . style . NOTICE ( 'Waiting for import' ) , ending = '' )
state = utils . ConnectionStates . IMPORT_CONFIGURATION
while state == utils . ConnectionStates . IMPORT_CONFIGURATION : # before you get the first state , the API can be a bit behind
self . stdout . write ( self . style . NOTICE ( '.' ) , ending = '' )
time . sleep ( wait_interval )
# take a breath
try :
connection = utils . get_connection ( connection_id )
except requests . HTTPError as e :
raise CommandError ( "Failed to fetch connection information." ) from e
else :
state = connection [ 'state' ]
self . stdout . write ( self . style . NOTICE ( ' Done!' ) ) |
def type ( self ) :
"""Certificate type .
: return : The type of the certificate .
: rtype : CertificateType""" | if self . _device_mode == 1 or self . _type == CertificateType . developer :
return CertificateType . developer
elif self . _type == CertificateType . bootstrap :
return CertificateType . bootstrap
else :
return CertificateType . lwm2m |
def bookmark_list ( ) :
"""Executor for ` globus bookmark list `""" | client = get_client ( )
bookmark_iterator = client . bookmark_list ( )
def get_ep_name ( item ) :
ep_id = item [ "endpoint_id" ]
try :
ep_doc = client . get_endpoint ( ep_id )
return display_name_or_cname ( ep_doc )
except TransferAPIError as err :
if err . code == "EndpointDeleted" :
return "[DELETED ENDPOINT]"
else :
raise err
formatted_print ( bookmark_iterator , fields = [ ( "Name" , "name" ) , ( "Bookmark ID" , "id" ) , ( "Endpoint ID" , "endpoint_id" ) , ( "Endpoint Name" , get_ep_name ) , ( "Path" , "path" ) , ] , response_key = "DATA" , json_converter = iterable_response_to_dict , ) |
def refine ( video , episode_refiners = None , movie_refiners = None , ** kwargs ) :
"""Refine a video using : ref : ` refiners ` .
. . note : :
Exceptions raised in refiners are silently passed and logged .
: param video : the video to refine .
: type video : : class : ` ~ subliminal . video . Video `
: param tuple episode _ refiners : refiners to use for episodes .
: param tuple movie _ refiners : refiners to use for movies .
: param \ * \ * kwargs : additional parameters for the : func : ` ~ subliminal . refiners . refine ` functions .""" | refiners = ( )
if isinstance ( video , Episode ) :
refiners = episode_refiners or ( 'metadata' , 'tvdb' , 'omdb' )
elif isinstance ( video , Movie ) :
refiners = movie_refiners or ( 'metadata' , 'omdb' )
for refiner in refiners :
logger . info ( 'Refining video with %s' , refiner )
try :
refiner_manager [ refiner ] . plugin ( video , ** kwargs )
except :
logger . exception ( 'Failed to refine video' ) |
def listFiles ( self , dataset = "" , block_name = "" , logical_file_name = "" , release_version = "" , pset_hash = "" , app_name = "" , output_module_label = "" , run_num = - 1 , origin_site_name = "" , lumi_list = "" , detail = False , validFileOnly = 0 , sumOverLumi = 0 ) :
"""API to list files in DBS . Either non - wildcarded logical _ file _ name , non - wildcarded dataset or non - wildcarded block _ name is required .
The combination of a non - wildcarded dataset or block _ name with an wildcarded logical _ file _ name is supported .
* For lumi _ list the following two json formats are supported :
- [ a1 , a2 , a3 , ]
- [ [ a , b ] , [ c , d ] , ]
* lumi _ list can be either a list of lumi section numbers as [ a1 , a2 , a3 , ] or a list of lumi section range as [ [ a , b ] , [ c , d ] , ] . Thay cannot be mixed .
* If lumi _ list is provided run only run _ num = single - run - number is allowed
* When lfn list is present , no run or lumi list is allowed .
* There are five dataset access types : VALID , INVALID , PRODUCTION , DEPRECATED and DELETED .
* One file status : IS _ FILE _ VALID : 1 or 0.
* When a dataset is INVALID / DEPRECATED / DELETED , DBS will consider all the files under it is invalid not matter what value is _ file _ valid has .
In general , when the dataset is in one of INVALID / DEPRECATED / DELETED , is _ file _ valid should all marked as 0 , but some old DBS2 data was not .
* When Dataset is VALID / PRODUCTION , by default is _ file _ valid is all 1 . But if individual file is invalid , then the file ' s is _ file _ valid is set to 0.
* DBS use this logical in its APIs that have validFileOnly variable .
* There are five dataset access types : VALID , INVALID , PRODUCTION , DEPRECATED and DELETED .
* One file status : IS _ FILE _ VALID : 1 or 0.
* When a dataset is INVALID / DEPRECATED / DELETED , DBS will consider all the files under it is invalid not matter what value is _ file _ valid has .
In general , when the dataset is in one of INVALID / DEPRECATED / DELETED , is _ file _ valid should all marked as 0 , but some old DBS2 data was not .
* When Dataset is VALID / PRODUCTION , by default is _ file _ valid is all 1 . But if individual file is invalid , then the file ' s is _ file _ valid is set to 0.
* DBS use this logical in its APIs that have validFileOnly variable .
: param logical _ file _ name : logical _ file _ name of the file
: type logical _ file _ name : str
: param dataset : dataset
: type dataset : str
: param block _ name : block name
: type block _ name : str
: param release _ version : release version
: type release _ version : str
: param pset _ hash : parameter set hash
: type pset _ hash : str
: param app _ name : Name of the application
: type app _ name : str
: param output _ module _ label : name of the used output module
: type output _ module _ label : str
: param run _ num : run , run ranges , and run list . Possible format are : run _ num , ' run _ min - run _ max ' or [ ' run _ min - run _ max ' , run1 , run2 , . . . ] .
: type run _ num : int , list , string
: param origin _ site _ name : site where the file was created
: type origin _ site _ name : str
: param lumi _ list : List containing luminosity sections
: type lumi _ list : list
: param detail : Get detailed information about a file
: type detail : bool
: param validFileOnly : default = 0 return all the files . when = 1 , only return files with is _ file _ valid = 1 or dataset _ access _ type = PRODUCTION or VALID
: type validFileOnly : int
: param sumOverLumi : default = 0 event _ count is the event _ count / file . When sumOverLumi = 1 and run _ num is specified , the event _ count is sum of the event _ count / lumi for that run ; When sumOverLumi = 1 , no other input can be a list , for example no run _ num list , lumi list or lfn list .
: type sumOverLumi : int
: returns : List of dictionaries containing the following keys ( logical _ file _ name ) . If detail parameter is true , the dictionaries contain the following keys ( check _ sum , branch _ hash _ id , adler32 , block _ id , event _ count , file _ type , create _ by , logical _ file _ name , creation _ date , last _ modified _ by , dataset , block _ name , file _ id , file _ size , last _ modification _ date , dataset _ id , file _ type _ id , auto _ cross _ section , md5 , is _ file _ valid )
: rtype : list of dicts""" | logical_file_name = logical_file_name . replace ( "*" , "%" )
release_version = release_version . replace ( "*" , "%" )
pset_hash = pset_hash . replace ( "*" , "%" )
app_name = app_name . replace ( "*" , "%" )
block_name = block_name . replace ( "*" , "%" )
origin_site_name = origin_site_name . replace ( "*" , "%" )
dataset = dataset . replace ( "*" , "%" )
# run _ num = 1 caused full table scan and CERN DBS reported some of the queries ran more than 50 hours
# We will disbale all the run _ num = 1 calls in DBS . Run _ num = 1 will be OK when logical _ file _ name is given .
# YG Jan . 15 2019
if ( run_num != - 1 and logical_file_name == '' ) :
for r in parseRunRange ( run_num ) :
if isinstance ( r , basestring ) or isinstance ( r , int ) or isinstance ( r , long ) :
if r == 1 or r == '1' :
dbsExceptionHandler ( "dbsException-invalid-input" , "Run_num=1 is not a valid input." , self . logger . exception )
elif isinstance ( r , run_tuple ) :
if r [ 0 ] == r [ 1 ] :
dbsExceptionHandler ( "dbsException-invalid-input" , "DBS run range must be apart at least by 1." , self . logger . exception )
elif r [ 0 ] <= 1 <= r [ 1 ] :
dbsExceptionHandler ( "dbsException-invalid-input" , "Run_num=1 is not a valid input." , self . logger . exception )
if lumi_list :
if run_num == - 1 or not run_num :
dbsExceptionHandler ( "dbsException-invalid-input" , "When lumi_list is given, require a single run_num." , self . logger . exception )
elif sumOverLumi == 1 :
dbsExceptionHandler ( "dbsException-invalid-input" , "lumi_list and sumOverLumi=1 cannot be set at the same time becaue nesting of WITH clause within WITH clause not supported yet by Oracle. " , self . logger . exception )
else :
try :
lumi_list = self . dbsUtils2 . decodeLumiIntervals ( lumi_list )
except Exception as de :
dbsExceptionHandler ( "dbsException-invalid-input" , "Invalid lumi_list input: " + str ( de ) , self . logger . exception )
else :
if not isinstance ( run_num , list ) :
if run_num == 1 or run_num == '1' :
dbsExceptionHandler ( "dbsException-invalid-input" , "files API does not supprt run_num=1 when no lumi." , self . logger . exception )
else :
if 1 in run_num or '1' in run_num :
dbsExceptionHandler ( "dbsException-invalid-input" , "files API does not supprt run_num=1 when no lumi." , self . logger . exception )
if int ( sumOverLumi ) == 1 and ( isinstance ( run_num , list ) or isinstance ( logical_file_name , list ) ) :
dbsExceptionHandler ( "dbsException-invalid-input" , "When sumOverLumi=1, no lfn list or run_num list allowed becaue nesting of WITH clause within WITH clause not supported yet by Oracle. " , self . logger . exception )
detail = detail in ( True , 1 , "True" , "1" , 'true' )
output_module_label = output_module_label . replace ( "*" , "%" )
try :
result = self . dbsFile . listFiles ( dataset , block_name , logical_file_name , release_version , pset_hash , app_name , output_module_label , run_num , origin_site_name , lumi_list , detail , validFileOnly , sumOverLumi )
for item in result :
yield item
except HTTPError as he :
raise he
except dbsException as de :
dbsExceptionHandler ( de . eCode , de . message , self . logger . exception , de . serverError )
except Exception as ex :
sError = "DBSReaderModel/listFiles. %s \n Exception trace: \n %s" % ( ex , traceback . format_exc ( ) )
dbsExceptionHandler ( 'dbsException-server-error' , ex . message , self . logger . exception , sError ) |
def before_first_request ( self , fn ) :
"""Registers a function to be run before the first request to this
instance of the application .
The function will be called without any arguments and its return
value is ignored .""" | self . _defer ( lambda app : app . before_first_request ( fn ) )
return fn |
def setStartAction ( self , action , * args , ** kwargs ) :
"""Set a function to call when run ( ) is called , before the main action is called .
Parameters
action : function pointer
The function to call .
* args
Positional arguments to pass to action .
* * kwargs :
Keyword arguments to pass to action .""" | self . init_action = action
self . init_args = args
self . init_kwargs = kwargs |
def on_btn_demag_gui ( self , event ) :
"""Open Demag GUI""" | if not self . check_for_meas_file ( ) :
return
if not self . check_for_uncombined_files ( ) :
return
outstring = "demag_gui.py -WD %s" % self . WD
print ( "-I- running python script:\n %s" % ( outstring ) )
if self . data_model_num == 2 :
demag_gui . start ( self . WD , standalone_app = False , parent = self , DM = self . data_model_num )
else : # disable and hide Pmag GUI mainframe
self . Disable ( )
self . Hide ( )
# show busyinfo
wait = wx . BusyInfo ( 'Compiling required data, please wait...' )
wx . SafeYield ( )
# create custom Demag GUI closing event and bind it
DemagGuiExitEvent , EVT_DEMAG_GUI_EXIT = newevent . NewCommandEvent ( )
self . Bind ( EVT_DEMAG_GUI_EXIT , self . on_analysis_gui_exit )
# make and show the Demag GUI frame
demag_gui_frame = demag_gui . Demag_GUI ( self . WD , self , write_to_log_file = False , data_model = self . data_model_num , evt_quit = DemagGuiExitEvent )
demag_gui_frame . Centre ( )
demag_gui_frame . Show ( )
del wait |
def yaw ( self ) :
"""Calculates the Yaw of the Quaternion .""" | x , y , z , w = self . x , self . y , self . z , self . w
return math . asin ( 2 * x * y + 2 * z * w ) |
def convert_data_to_dtype ( data , data_type , mot_float_type = 'float' ) :
"""Convert the given input data to the correct numpy type .
Args :
data ( ndarray ) : The value to convert to the correct numpy type
data _ type ( str ) : the data type we need to convert the data to
mot _ float _ type ( str ) : the data type of the current ` ` mot _ float _ type ` `
Returns :
ndarray : the input data but then converted to the desired numpy data type""" | scalar_dtype = ctype_to_dtype ( data_type , mot_float_type )
if isinstance ( data , numbers . Number ) :
data = scalar_dtype ( data )
if is_vector_ctype ( data_type ) :
shape = data . shape
dtype = ctype_to_dtype ( data_type , mot_float_type )
ve = np . zeros ( shape [ : - 1 ] , dtype = dtype )
if len ( shape ) == 1 :
for vector_ind in range ( shape [ 0 ] ) :
ve [ 0 ] [ vector_ind ] = data [ vector_ind ]
elif len ( shape ) == 2 :
for i in range ( data . shape [ 0 ] ) :
for vector_ind in range ( data . shape [ 1 ] ) :
ve [ i ] [ vector_ind ] = data [ i , vector_ind ]
elif len ( shape ) == 3 :
for i in range ( data . shape [ 0 ] ) :
for j in range ( data . shape [ 1 ] ) :
for vector_ind in range ( data . shape [ 2 ] ) :
ve [ i , j ] [ vector_ind ] = data [ i , j , vector_ind ]
return np . require ( ve , requirements = [ 'C' , 'A' , 'O' ] )
return np . require ( data , scalar_dtype , [ 'C' , 'A' , 'O' ] ) |
def quantile_normalize ( matrix , inplace = False , target = None ) :
"""Quantile normalization , allowing for missing values ( NaN ) .
In case of nan values , this implementation will calculate evenly
distributed quantiles and fill in the missing data with those values .
Quantile normalization is then performed on the filled - in matrix ,
and the nan values are restored afterwards .
Parameters
matrix : ` ExpMatrix `
The expression matrix ( rows = genes , columns = samples ) .
inplace : bool
Whether or not to perform the operation in - place . [ False ]
target : ` numpy . ndarray `
Target distribution to use . needs to be a vector whose first
dimension matches that of the expression matrix . If ` ` None ` ` ,
the target distribution is calculated based on the matrix
itself . [ None ]
Returns
numpy . ndarray ( ndim = 2)
The normalized matrix .""" | assert isinstance ( matrix , ExpMatrix )
assert isinstance ( inplace , bool )
if target is not None :
assert isinstance ( target , np . ndarray ) and np . issubdtype ( target . dtype , np . float )
if not inplace : # make a copy of the original data
matrix = matrix . copy ( )
X = matrix . X
_ , n = X . shape
nan = [ ]
# fill in missing values with evenly spaced quantiles
for j in range ( n ) :
nan . append ( np . nonzero ( np . isnan ( X [ : , j ] ) ) [ 0 ] )
if nan [ j ] . size > 0 :
q = np . arange ( 1 , nan [ j ] . size + 1 , dtype = np . float64 ) / ( nan [ j ] . size + 1.0 )
fill = np . nanpercentile ( X [ : , j ] , 100 * q )
X [ nan [ j ] , j ] = fill
# generate sorting indices
A = np . argsort ( X , axis = 0 , kind = 'mergesort' )
# mergesort is stable
# reorder matrix
for j in range ( n ) :
matrix . iloc [ : , j ] = matrix . X [ A [ : , j ] , j ]
# determine target distribution
if target is None : # No target distribution is specified , calculate one based on the
# expression matrix .
target = np . mean ( matrix . X , axis = 1 )
else : # Use specified target distribution ( after sorting ) .
target = np . sort ( target )
# generate indices to reverse sorting
A = np . argsort ( A , axis = 0 , kind = 'mergesort' )
# mergesort is stable
# quantile - normalize
for j in range ( n ) :
matrix . iloc [ : , j ] = target [ A [ : , j ] ]
# set missing values to NaN again
for j in range ( n ) :
if nan [ j ] . size > 0 :
matrix . iloc [ nan [ j ] , j ] = np . nan
return matrix |
def _apply_scope ( self , scope , builder ) :
"""Apply a single scope on the given builder instance .
: param scope : The scope to apply
: type scope : callable or Scope
: param builder : The builder to apply the scope to
: type builder : Builder""" | if callable ( scope ) :
scope ( builder )
elif isinstance ( scope , Scope ) :
scope . apply ( builder , self . get_model ( ) ) |
def are_none ( sequences : Sequence [ Sized ] ) -> bool :
"""Returns True if all sequences are None .""" | if not sequences :
return True
return all ( s is None for s in sequences ) |
def build_server_from_config ( config , section_name , server_klass = None , handler_klass = None ) :
"""Build a server from a provided : py : class : ` configparser . ConfigParser `
instance . If a ServerClass or HandlerClass is specified , then the
object must inherit from the corresponding AdvancedHTTPServer base
class .
: param config : Configuration to retrieve settings from .
: type config : : py : class : ` configparser . ConfigParser `
: param str section _ name : The section name of the configuration to use .
: param server _ klass : Alternative server class to use .
: type server _ klass : : py : class : ` . AdvancedHTTPServer `
: param handler _ klass : Alternative handler class to use .
: type handler _ klass : : py : class : ` . RequestHandler `
: return : A configured server instance .
: rtype : : py : class : ` . AdvancedHTTPServer `""" | server_klass = ( server_klass or AdvancedHTTPServer )
handler_klass = ( handler_klass or RequestHandler )
port = config . getint ( section_name , 'port' )
web_root = None
if config . has_option ( section_name , 'web_root' ) :
web_root = config . get ( section_name , 'web_root' )
if config . has_option ( section_name , 'ip' ) :
ip = config . get ( section_name , 'ip' )
else :
ip = '0.0.0.0'
ssl_certfile = None
if config . has_option ( section_name , 'ssl_cert' ) :
ssl_certfile = config . get ( section_name , 'ssl_cert' )
ssl_keyfile = None
if config . has_option ( section_name , 'ssl_key' ) :
ssl_keyfile = config . get ( section_name , 'ssl_key' )
ssl_version = None
if config . has_option ( section_name , 'ssl_version' ) :
ssl_version = config . get ( section_name , 'ssl_version' )
server = server_klass ( handler_klass , address = ( ip , port ) , ssl_certfile = ssl_certfile , ssl_keyfile = ssl_keyfile , ssl_version = ssl_version )
if config . has_option ( section_name , 'password_type' ) :
password_type = config . get ( section_name , 'password_type' )
else :
password_type = 'md5'
if config . has_option ( section_name , 'password' ) :
password = config . get ( section_name , 'password' )
if config . has_option ( section_name , 'username' ) :
username = config . get ( section_name , 'username' )
else :
username = ''
server . auth_add_creds ( username , password , pwtype = password_type )
cred_idx = 0
while config . has_option ( section_name , 'password' + str ( cred_idx ) ) :
password = config . get ( section_name , 'password' + str ( cred_idx ) )
if not config . has_option ( section_name , 'username' + str ( cred_idx ) ) :
break
username = config . get ( section_name , 'username' + str ( cred_idx ) )
server . auth_add_creds ( username , password , pwtype = password_type )
cred_idx += 1
if web_root is None :
server . serve_files = False
else :
server . serve_files = True
server . serve_files_root = web_root
if config . has_option ( section_name , 'list_directories' ) :
server . serve_files_list_directories = config . getboolean ( section_name , 'list_directories' )
return server |
def PlaceSOffsetT ( self , x ) :
"""PlaceSOffsetT prepends a SOffsetT to the Builder , without checking
for space .""" | N . enforce_number ( x , N . SOffsetTFlags )
self . head = self . head - N . SOffsetTFlags . bytewidth
encode . Write ( packer . soffset , self . Bytes , self . Head ( ) , x ) |
def run ( self , cmdline_args = None , program_name = "start_service" , version = workflows . version ( ) , ** kwargs ) :
"""Example command line interface to start services .
: param cmdline _ args : List of command line arguments to pass to parser
: param program _ name : Name of the command line tool to display in help
: param version : Version number to print when run with ' - - version '""" | # Enumerate all known services
known_services = workflows . services . get_known_services ( )
# Set up parser
parser = OptionParser ( usage = program_name + " [options]" if program_name else None , version = version )
parser . add_option ( "-?" , action = "help" , help = SUPPRESS_HELP )
parser . add_option ( "-s" , "--service" , dest = "service" , metavar = "SVC" , default = None , help = "Name of the service to start. Known services: " + ", " . join ( known_services ) , )
parser . add_option ( "-t" , "--transport" , dest = "transport" , metavar = "TRN" , default = "StompTransport" , help = "Transport mechanism. Known mechanisms: " + ", " . join ( workflows . transport . get_known_transports ( ) ) + " (default: %default)" , )
workflows . transport . add_command_line_options ( parser )
# Call on _ parser _ preparation hook
parser = self . on_parser_preparation ( parser ) or parser
# Parse command line options
( options , args ) = parser . parse_args ( cmdline_args )
# Call on _ parsing hook
( options , args ) = self . on_parsing ( options , args ) or ( options , args )
# Create Transport factory
transport_factory = workflows . transport . lookup ( options . transport )
# Call on _ transport _ factory _ preparation hook
transport_factory = ( self . on_transport_factory_preparation ( transport_factory ) or transport_factory )
# Set up on _ transport _ preparation hook to affect newly created transport objects
true_transport_factory_call = transport_factory . __call__
def on_transport_preparation_hook ( ) :
transport_object = true_transport_factory_call ( )
return self . on_transport_preparation ( transport_object ) or transport_object
transport_factory . __call__ = on_transport_preparation_hook
# When service name is specified , check if service exists or can be derived
if options . service and options . service not in known_services :
matching = [ s for s in known_services if s . startswith ( options . service ) ]
if not matching :
matching = [ s for s in known_services if s . lower ( ) . startswith ( options . service . lower ( ) ) ]
if matching and len ( matching ) == 1 :
options . service = matching [ 0 ]
kwargs . update ( { "service" : options . service , "transport" : transport_factory } )
# Call before _ frontend _ construction hook
kwargs = self . before_frontend_construction ( kwargs ) or kwargs
# Create Frontend object
frontend = workflows . frontend . Frontend ( ** kwargs )
# Call on _ frontend _ preparation hook
frontend = self . on_frontend_preparation ( frontend ) or frontend
# Start Frontend
try :
frontend . run ( )
except KeyboardInterrupt :
print ( "\nShutdown via Ctrl+C" ) |
def get_taxids ( list_of_names ) :
"""> > > mylist = [ ' Arabidopsis thaliana ' , ' Carica papaya ' ]
> > > get _ taxids ( mylist )
[1 , 2]""" | from jcvi . apps . fetch import batch_taxids
return [ int ( x ) for x in batch_taxids ( list_of_names ) ] |
def istext ( somestr ) :
"""Checks that some string is a text
: param str somestr :
It is some string that will be checked for text .
The text is string that contains only words or special words such as preposition
( what is the word and the special word see at help ( palindromus . isword ) and help ( palindromus . isspecword ) ) .
All words can be divided any special symbols such as punctuation marks .
The text can be multiline
: except TypeError :
If the checked text is not a string
: return bool :""" | # check invalid data types
OnlyStringsCanBeChecked ( somestr )
# get all matches
matches = re . findall ( r'\w+' , somestr . strip ( ) , flags = re . IGNORECASE | re . MULTILINE )
if not len ( matches ) :
return False
else :
for match in matches :
if match . find ( "_" ) >= 0 or ( not isspecword ( match ) and not isword ( match ) ) :
return False
return True |
def process_link ( self , env , refnode , has_explicit_title , title , target ) :
"""This handles some special cases for reference links in . NET
First , the standard Sphinx reference syntax of ` ` : ref : ` Title < Link > ` ` ` ,
where a reference to ` ` Link ` ` is created with title ` ` Title ` ` , causes
problems for the generic . NET syntax of ` ` : dn : cls : ` FooBar < T > ` ` ` . So , here
we assume that ` ` < T > ` ` was the generic declaration , and fix the
reference .
This also uses : py : cls : ` AnyXRefRole ` to add ` ref _ context ` onto the
refnode . Add data there that you need it on refnodes .
This method also resolves special reference operators ` ` ~ ` ` and ` ` . ` `""" | result = super ( DotNetXRefRole , self ) . process_link ( env , refnode , has_explicit_title , title , target )
( title , target ) = result
if not has_explicit_title : # If the first character is a tilde , don ' t display the parent name
title = title . lstrip ( '.' )
target = target . lstrip ( '~' )
if title [ 0 : 1 ] == '~' :
title = title [ 1 : ]
dot = title . rfind ( '.' )
if dot != - 1 :
title = title [ dot + 1 : ]
else :
if title != target :
target = title = '{title}<{target}>' . format ( title = title , target = target )
return title , target |
def safe_expand ( template , mapping ) :
"""Safe string template expansion . Raises an error if the provided substitution mapping has circularities .""" | for _ in range ( len ( mapping ) + 1 ) :
_template = template
template = string . Template ( template ) . safe_substitute ( mapping )
if template == _template :
return template
else :
raise ValueError ( "circular mapping provided!" ) |
def greater ( lhs , rhs ) :
"""Returns the result of element - wise * * greater than * * ( > ) comparison operation
with broadcasting .
For each element in input arrays , return 1 ( true ) if lhs elements are greater than rhs ,
otherwise return 0 ( false ) .
Equivalent to ` ` lhs > rhs ` ` and ` ` mx . nd . broadcast _ greater ( lhs , rhs ) ` ` .
. . note : :
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape .
Parameters
lhs : scalar or mxnet . ndarray . array
First array to be compared .
rhs : scalar or mxnet . ndarray . array
Second array to be compared . If ` ` lhs . shape ! = rhs . shape ` ` , they must be
broadcastable to a common shape .
Returns
NDArray
Output array of boolean values .
Examples
> > > x = mx . nd . ones ( ( 2,3 ) )
> > > y = mx . nd . arange ( 2 ) . reshape ( ( 2,1 ) )
> > > z = mx . nd . arange ( 2 ) . reshape ( ( 1,2 ) )
> > > x . asnumpy ( )
array ( [ [ 1 . , 1 . , 1 . ] ,
[ 1 . , 1 . , 1 . ] ] , dtype = float32)
> > > y . asnumpy ( )
array ( [ [ 0 . ] ,
[ 1 . ] ] , dtype = float32)
> > > z . asnumpy ( )
array ( [ [ 0 . , 1 . ] ] , dtype = float32)
> > > ( x > 1 ) . asnumpy ( )
array ( [ [ 0 . , 0 . , 0 . ] ,
[ 0 . , 0 . , 0 . ] ] , dtype = float32)
> > > ( x > y ) . asnumpy ( )
array ( [ [ 1 . , 1 . , 1 . ] ,
[ 0 . , 0 . , 0 . ] ] , dtype = float32)
> > > mx . nd . greater ( x , y ) . asnumpy ( )
array ( [ [ 1 . , 1 . , 1 . ] ,
[ 0 . , 0 . , 0 . ] ] , dtype = float32)
> > > ( z > y ) . asnumpy ( )
array ( [ [ 0 . , 1 . ] ,
[ 0 . , 0 . ] ] , dtype = float32)""" | # pylint : disable = no - member , protected - access
return _ufunc_helper ( lhs , rhs , op . broadcast_greater , lambda x , y : 1 if x > y else 0 , _internal . _greater_scalar , _internal . _lesser_scalar ) |
def hatnotes ( self ) :
"""list : Parse hatnotes from the HTML
Note :
Not settable
Note :
Side effect is to also pull the html which can be slow
Note :
This is a parsing operation and not part of the standard API""" | if self . _hatnotes is None :
self . _hatnotes = list ( )
soup = BeautifulSoup ( self . html , "html.parser" )
notes = soup . findAll ( "" , { "class" : "hatnote" } )
if notes is not None :
for note in notes :
tmp = list ( )
for child in note . children :
if hasattr ( child , "text" ) :
tmp . append ( child . text )
else :
tmp . append ( child )
self . _hatnotes . append ( "" . join ( tmp ) )
return self . _hatnotes |
def serialize ( self , path ) :
"""Saves the raw ( read unsmoothed ) histogram data to the given path using
pickle python module .""" | pickle . dump ( [ self . x , self . y_raw ] , file ( path , 'w' ) ) |
def multi_to_dict ( multi ) :
'''Transform a Werkzeug multidictionnary into a flat dictionnary''' | return dict ( ( key , value [ 0 ] if len ( value ) == 1 else value ) for key , value in multi . to_dict ( False ) . items ( ) ) |
def read_td_query ( query , engine , index_col = None , parse_dates = None , distributed_join = False , params = None ) :
'''Read Treasure Data query into a DataFrame .
Returns a DataFrame corresponding to the result set of the query string .
Optionally provide an index _ col parameter to use one of the columns as
the index , otherwise default integer index will be used .
Parameters
query : string
Query string to be executed .
engine : QueryEngine
Handler returned by create _ engine .
index _ col : string , optional
Column name to use as index for the returned DataFrame object .
parse _ dates : list or dict , optional
- List of column names to parse as dates
- Dict of { column _ name : format string } where format string is strftime
compatible in case of parsing string times or is one of ( D , s , ns , ms , us )
in case of parsing integer timestamps
distributed _ join : boolean , default False
( Presto only ) If True , distributed join is enabled . If False , broadcast join is used .
See https : / / prestodb . io / docs / current / release / release - 0.77 . html
params : dict , optional
Parameters to pass to execute method .
Available parameters :
- result _ url ( str ) : result output URL
- priority ( int or str ) : priority ( e . g . " NORMAL " , " HIGH " , etc . )
- retry _ limit ( int ) : retry limit
Returns
DataFrame''' | if params is None :
params = { }
# header
header = engine . create_header ( "read_td_query" )
if engine . type == 'presto' and distributed_join is not None :
header += "-- set session distributed_join = '{0}'\n" . format ( 'true' if distributed_join else 'false' )
# execute
r = engine . execute ( header + query , ** params )
return r . to_dataframe ( index_col = index_col , parse_dates = parse_dates ) |
def height ( self ) :
"""Returns height of the ( sub ) tree , without considering
empty leaf - nodes
> > > create ( dimensions = 2 ) . height ( )
> > > create ( [ ( 1 , 2 ) ] ) . height ( )
> > > create ( [ ( 1 , 2 ) , ( 2 , 3 ) ] ) . height ( )""" | min_height = int ( bool ( self ) )
return max ( [ min_height ] + [ c . height ( ) + 1 for c , p in self . children ] ) |
def radec2azel ( ra_deg : float , dec_deg : float , lat_deg : float , lon_deg : float , time : datetime ) -> Tuple [ float , float ] :
"""converts right ascension , declination to azimuth , elevation
Parameters
ra _ deg : float or numpy . ndarray of float
right ascension to target [ degrees ]
dec _ deg : float or numpy . ndarray of float
declination to target [ degrees ]
lat _ deg : float
observer WGS84 latitude [ degrees ]
lon _ deg : float
observer WGS84 longitude [ degrees ]
time : datetime . datetime
time of observation
Results
az _ deg : float or numpy . ndarray of float
azimuth clockwise from north to point [ degrees ]
el _ deg : float or numpy . ndarray of float
elevation above horizon to point [ degrees ]
from D . Vallado " Fundamentals of Astrodynamics and Applications "
4th Edition Ch . 4.4 pg . 266-268""" | ra = atleast_1d ( ra_deg )
dec = atleast_1d ( dec_deg )
lat = atleast_1d ( lat_deg )
lon = atleast_1d ( lon_deg )
if ra . shape != dec . shape :
raise ValueError ( 'az and el must be same shape ndarray' )
if not ( lat . size == 1 and lon . size == 1 ) :
raise ValueError ( 'need one observer and one or more (az,el).' )
if ( ( lat < - 90 ) | ( lat > 90 ) ) . any ( ) :
raise ValueError ( '-90 <= lat <= 90' )
ra = radians ( ra )
dec = radians ( dec )
lat = radians ( lat )
lon = radians ( lon )
lst = datetime2sidereal ( time , lon )
# RADIANS
# % % Eq . 4-11 p . 267 LOCAL HOUR ANGLE
lha = lst - ra
# % % # Eq . 4-12 p . 267
el = arcsin ( sin ( lat ) * sin ( dec ) + cos ( lat ) * cos ( dec ) * cos ( lha ) )
# % % combine Eq . 4-13 and 4-14 p . 268
az = arctan2 ( - sin ( lha ) * cos ( dec ) / cos ( el ) , ( sin ( dec ) - sin ( el ) * sin ( lat ) ) / ( cos ( el ) * cos ( lat ) ) )
return degrees ( az ) % 360.0 , degrees ( el ) |
def get_std_dev_area ( self , mag , rake ) :
"""Standard deviation for WC1994 . Magnitude is ignored .""" | assert rake is None or - 180 <= rake <= 180
if rake is None : # their " All " case
return 0.24
elif ( - 45 <= rake <= 45 ) or ( rake >= 135 ) or ( rake <= - 135 ) : # strike slip
return 0.22
elif rake > 0 : # thrust / reverse
return 0.26
else : # normal
return 0.22 |
def par_compute_residuals ( i ) :
"""Compute components of the residual and stopping thresholds that
can be done in parallel .
Parameters
i : int
Index of group to compute""" | # Compute the residuals in parallel , need to check if the residuals
# depend on alpha
global mp_ry0
global mp_ry1
global mp_sy0
global mp_sy1
global mp_nrmAx
global mp_nrmBy
global mp_nrmu
mp_ry0 [ i ] = np . sum ( ( mp_DXnr [ i ] - mp_Y0 [ i ] ) ** 2 )
mp_ry1 [ i ] = mp_alpha ** 2 * np . sum ( ( mp_Xnr [ mp_grp [ i ] : mp_grp [ i + 1 ] ] - mp_Y1 [ mp_grp [ i ] : mp_grp [ i + 1 ] ] ) ** 2 )
mp_sy0 [ i ] = np . sum ( ( mp_Y0old [ i ] - mp_Y0 [ i ] ) ** 2 )
mp_sy1 [ i ] = mp_alpha ** 2 * np . sum ( ( mp_Y1old [ mp_grp [ i ] : mp_grp [ i + 1 ] ] - mp_Y1 [ mp_grp [ i ] : mp_grp [ i + 1 ] ] ) ** 2 )
mp_nrmAx [ i ] = np . sum ( mp_DXnr [ i ] ** 2 ) + mp_alpha ** 2 * np . sum ( mp_Xnr [ mp_grp [ i ] : mp_grp [ i + 1 ] ] ** 2 )
mp_nrmBy [ i ] = np . sum ( mp_Y0 [ i ] ** 2 ) + mp_alpha ** 2 * np . sum ( mp_Y1 [ mp_grp [ i ] : mp_grp [ i + 1 ] ] ** 2 )
mp_nrmu [ i ] = np . sum ( mp_U0 [ i ] ** 2 ) + np . sum ( mp_U1 [ mp_grp [ i ] : mp_grp [ i + 1 ] ] ** 2 ) |
def partes ( self , num_partes = 11 ) :
"""Particiona a chave do CF - e - SAT em uma lista de * n * segmentos .
: param int num _ partes : O número de segmentos ( partes ) em que os digitos
da chave do CF - e - SAT serão particionados . * * Esse número deverá
resultar em uma divisão inteira por 44 ( o comprimento da chave ) * * .
Se não for informado , assume ` ` 11 ` ` partes , comumente utilizado
para apresentar a chave do CF - e - SAT no extrato .
: return : Lista de strings contendo a chave do CF - e - SAT particionada .
: rtype : list""" | assert 44 % num_partes == 0 , 'O numero de partes nao produz um ' 'resultado inteiro (partes por 44 digitos): ' 'num_partes=%s' % num_partes
salto = 44 // num_partes
return [ self . _campos [ n : ( n + salto ) ] for n in range ( 0 , 44 , salto ) ] |
def makeExtensionLoginMethod ( extensionKey ) :
'''Return a function that will call the vim . SessionManager . Login ( ) method
with the given parameters . The result of this function can be passed as
the " loginMethod " to a SessionOrientedStub constructor .''' | def _doLogin ( soapStub ) :
si = vim . ServiceInstance ( "ServiceInstance" , soapStub )
sm = si . content . sessionManager
if not sm . currentSession :
si . content . sessionManager . LoginExtensionByCertificate ( extensionKey )
return _doLogin |
def get_next_objective_bank ( self ) :
"""Gets the next ObjectiveBank in this list .
return : ( osid . learning . ObjectiveBank ) - the next ObjectiveBank
in this list . The has _ next ( ) method should be used to
test that a next ObjectiveBank is available before
calling this method .
raise : IllegalState - no more elements available in this list
raise : OperationFailed - unable to complete request
compliance : mandatory - This method must be implemented .""" | try :
next_object = next ( self )
except StopIteration :
raise IllegalState ( 'no more elements available in this list' )
except Exception : # Need to specify exceptions here !
raise OperationFailed ( )
else :
return next_object |
def jinja_fragment_extension ( tag , endtag = None , name = None , tag_only = False , allow_args = True , callblock_args = None ) :
"""Decorator to easily create a jinja extension which acts as a fragment .""" | if endtag is None :
endtag = "end" + tag
def decorator ( f ) :
def parse ( self , parser ) :
lineno = parser . stream . next ( ) . lineno
args = [ ]
kwargs = [ ]
if allow_args :
args , kwargs = parse_block_signature ( parser )
call = self . call_method ( "support_method" , args , kwargs , lineno = lineno )
if tag_only :
return nodes . Output ( [ call ] , lineno = lineno )
call_args = [ ]
if callblock_args is not None :
for arg in callblock_args :
call_args . append ( nodes . Name ( arg , 'param' , lineno = lineno ) )
body = parser . parse_statements ( [ 'name:' + endtag ] , drop_needle = True )
return nodes . CallBlock ( call , call_args , [ ] , body , lineno = lineno )
def support_method ( self , * args , ** kwargs ) :
return f ( * args , ** kwargs )
attrs = { "tags" : set ( [ tag ] ) , "parse" : parse , "support_method" : support_method }
return type ( name or f . __name__ , ( Extension , ) , attrs )
return decorator |
def gray2qimage ( gray , normalize = False ) :
"""Convert the 2D numpy array ` gray ` into a 8 - bit , indexed QImage _
with a gray colormap . The first dimension represents the vertical
image axis .
The parameter ` normalize ` can be used to normalize an image ' s
value range to 0 . . 255:
` normalize ` = ( nmin , nmax ) :
scale & clip image values from nmin . . nmax to 0 . . 255
` normalize ` = nmax :
lets nmin default to zero , i . e . scale & clip the range 0 . . nmax
to 0 . . 255
` normalize ` = True :
scale image values to 0 . . 255 ( same as passing ( gray . min ( ) ,
gray . max ( ) ) )
If the source array ` gray ` contains masked values , the result will
have only 255 shades of gray , and one color map entry will be used
to make the corresponding pixels transparent .
A full alpha channel cannot be supported with indexed images ;
instead , use ` array2qimage ` to convert into a 32 - bit QImage .
: param gray : image data which should be converted ( copied ) into a QImage _
: type gray : 2D or 3D numpy . ndarray _ or ` numpy . ma . array < masked arrays > ` _
: param normalize : normalization parameter ( see above , default : no value changing )
: type normalize : bool , scalar , or pair
: rtype : QImage _ with RGB32 or ARGB32 format""" | if _np . ndim ( gray ) != 2 :
raise ValueError ( "gray2QImage can only convert 2D arrays" + " (try using array2qimage)" if _np . ndim ( gray ) == 3 else "" )
h , w = gray . shape
result = _qt . QImage ( w , h , _qt . QImage . Format_Indexed8 )
if not _np . ma . is_masked ( gray ) :
for i in range ( 256 ) :
result . setColor ( i , _qt . qRgb ( i , i , i ) )
_qimageview ( result ) [ : ] = _normalize255 ( gray , normalize )
else : # map gray value 1 to gray value 0 , in order to make room for
# transparent colormap entry :
result . setColor ( 0 , _qt . qRgb ( 0 , 0 , 0 ) )
for i in range ( 2 , 256 ) :
result . setColor ( i - 1 , _qt . qRgb ( i , i , i ) )
_qimageview ( result ) [ : ] = _normalize255 ( gray , normalize , clip = ( 1 , 255 ) ) - 1
result . setColor ( 255 , 0 )
_qimageview ( result ) [ gray . mask ] = 255
return result |
def add_cors_headers ( request , response ) :
"""Add cors headers needed for web app implementation .""" | response . headerlist . append ( ( 'Access-Control-Allow-Origin' , '*' ) )
response . headerlist . append ( ( 'Access-Control-Allow-Methods' , 'GET, OPTIONS' ) )
response . headerlist . append ( ( 'Access-Control-Allow-Headers' , ',' . join ( DEFAULT_ACCESS_CONTROL_ALLOW_HEADERS ) ) ) |
def get_alerts_since ( self , timestamp ) :
"""Returns all the ` Alert ` objects of this ` Trigger ` that were fired since the specified timestamp .
: param timestamp : time object representing the point in time since when alerts have to be fetched
: type timestamp : int , ` ` datetime . datetime ` ` or ISO8601 - formatted string
: return : list of ` Alert ` instances""" | unix_timestamp = timeformatutils . to_UNIXtime ( timestamp )
result = [ ]
for alert in self . alerts :
if alert . last_update >= unix_timestamp :
result . append ( alert )
return result |
def log_to_parquet ( bro_log , parquet_file , compression = 'SNAPPY' , row_group_size = 1000000 ) :
"""write _ to _ parquet : Converts a Bro log into a Parquet file
Args :
bro _ log ( string : The full path to the bro log to be saved as a Parquet file
parquet _ file ( string ) : The full path to the filename for the Parquet file
compression ( string ) : The compression algo to use ( defaults to ' SNAPPY ' )
row _ group _ size ( int ) : The size of the parquet row groups ( defaults to 100000)
Notes :
Right now there are two open Parquet issues
- Timestamps in Spark : https : / / issues . apache . org / jira / browse / ARROW - 1499
- TimeDelta Support : https : / / issues . apache . org / jira / browse / ARROW - 835""" | # Set up various parameters
current_row_set = [ ]
writer = None
num_rows = 0
# Spin up the bro reader on a given log file
reader = BroLogReader ( bro_log )
for num_rows , row in enumerate ( reader . readrows ( ) ) : # Append the row to the row set
current_row_set . append ( row )
# If we have enough rows add to the Parquet table
if num_rows % row_group_size == 0 :
print ( 'Writing {:d} rows...' . format ( num_rows ) )
if writer is None :
arrow_table = pa . Table . from_pandas ( _make_df ( current_row_set ) )
writer = pq . ParquetWriter ( parquet_file , arrow_table . schema , compression = compression , use_deprecated_int96_timestamps = True )
writer . write_table ( arrow_table )
else :
arrow_table = pa . Table . from_pandas ( _make_df ( current_row_set ) )
writer . write_table ( arrow_table )
# Empty the current row set
current_row_set = [ ]
# Add any left over rows and close the Parquet file
if num_rows :
print ( 'Writing {:d} rows...' . format ( num_rows ) )
arrow_table = pa . Table . from_pandas ( _make_df ( current_row_set ) )
writer . write_table ( arrow_table )
writer . close ( )
print ( 'Parquet File Complete' ) |
def build_query_uri ( self , start = 0 , count = - 1 , filter = '' , query = '' , sort = '' , view = '' , fields = '' , uri = None , scope_uris = '' ) :
"""Builds the URI given the parameters .
More than one request can be send to get the items , regardless the query parameter ' count ' , because the actual
number of items in the response might differ from the requested count . Some types of resource have a limited
number of items returned on each call . For those resources , additional calls are made to the API to retrieve
any other items matching the given filter . The actual number of items can also differ from the requested call
if the requested number of items would take too long .
The use of optional parameters for OneView 2.0 is described at :
http : / / h17007 . www1 . hpe . com / docs / enterprise / servers / oneview2.0 / cic - api / en / api - docs / current / index . html
Note :
Single quote - " ' " - inside a query parameter is not supported by OneView API .
Args :
start :
The first item to return , using 0 - based indexing .
If not specified , the default is 0 - start with the first available item .
count :
The number of resources to return . A count of - 1 requests all items ( default ) .
filter ( list or str ) :
A general filter / query string to narrow the list of items returned . The default is no
filter ; all resources are returned .
query :
A single query parameter can do what would take multiple parameters or multiple GET requests using
filter . Use query for more complex queries . NOTE : This parameter is experimental for OneView 2.0.
sort :
The sort order of the returned data set . By default , the sort order is based on create time with the
oldest entry first .
view :
Returns a specific subset of the attributes of the resource or collection by specifying the name of a
predefined view . The default view is expand ( show all attributes of the resource and all elements of
the collections or resources ) .
fields :
Name of the fields .
uri :
A specific URI ( optional )
scope _ uris :
An expression to restrict the resources returned according to the scopes to
which they are assigned .
Returns :
uri : The complete uri""" | if filter :
filter = self . __make_query_filter ( filter )
if query :
query = "&query=" + quote ( query )
if sort :
sort = "&sort=" + quote ( sort )
if view :
view = "&view=" + quote ( view )
if fields :
fields = "&fields=" + quote ( fields )
if scope_uris :
scope_uris = "&scopeUris=" + quote ( scope_uris )
path = uri if uri else self . _uri
self . __validate_resource_uri ( path )
symbol = '?' if '?' not in path else '&'
uri = "{0}{1}start={2}&count={3}{4}{5}{6}{7}{8}{9}" . format ( path , symbol , start , count , filter , query , sort , view , fields , scope_uris )
return uri |
def read_file ( filepath ) :
"""read the file""" | with io . open ( filepath , "r" ) as filepointer :
res = filepointer . read ( )
return res |
def from_sky ( cls , magnitudelimit = None ) :
'''Create a Constellation from a criteria search of the whole sky .
Parameters
magnitudelimit : float
Maximum magnitude ( for Ve = " estimated V " ) .''' | # define a query for cone search surrounding this center
criteria = { }
if magnitudelimit is not None :
criteria [ cls . defaultfilter + 'mag' ] = '<{}' . format ( magnitudelimit )
v = Vizier ( columns = cls . columns , column_filters = criteria )
v . ROW_LIMIT = - 1
# run the query
print ( 'querying Vizier for {}, for {}<{}' . format ( cls . name , cls . defaultfilter , magnitudelimit ) )
table = v . query_constraints ( catalog = cls . catalog , ** criteria ) [ 0 ]
# store the search parameters in this object
c = cls ( cls . standardize_table ( table ) )
c . standardized . meta [ 'catalog' ] = cls . catalog
c . standardized . meta [ 'criteria' ] = criteria
c . standardized . meta [ 'magnitudelimit' ] = magnitudelimit or c . magnitudelimit
# c . magnitudelimit = magnitudelimit or c . magnitudelimit
return c |
def leehom_general_stats_table ( self ) :
"""Take the parsed stats from the leeHom report and add it to the
basic stats table at the top of the report""" | headers = { }
headers [ 'merged_trimming' ] = { 'title' : '{} Merged (Trimming)' . format ( config . read_count_prefix ) , 'description' : 'Merged clusters from trimming ({})' . format ( config . read_count_desc ) , 'min' : 0 , 'scale' : 'PuRd' , 'modify' : lambda x : x * config . read_count_multiplier , 'shared_key' : 'read_count' }
headers [ 'merged_overlap' ] = { 'title' : '{} Merged (Overlap)' . format ( config . read_count_prefix ) , 'description' : 'Merged clusters from overlapping reads ({})' . format ( config . read_count_desc ) , 'min' : 0 , 'scale' : 'PuRd' , 'modify' : lambda x : x * config . read_count_multiplier , 'shared_key' : 'read_count' }
self . general_stats_addcols ( self . leehom_data , headers ) |
def authenticate ( self , provider ) :
"""Starts OAuth authorization flow , will redirect to 3rd party site .""" | callback_url = url_for ( ".callback" , provider = provider , _external = True )
provider = self . get_provider ( provider )
session [ 'next' ] = request . args . get ( 'next' ) or ''
return provider . authorize ( callback_url ) |
def eigenvector_centrality_und ( CIJ ) :
'''Eigenector centrality is a self - referential measure of centrality :
nodes have high eigenvector centrality if they connect to other nodes
that have high eigenvector centrality . The eigenvector centrality of
node i is equivalent to the ith element in the eigenvector
corresponding to the largest eigenvalue of the adjacency matrix .
Parameters
CIJ : NxN np . ndarray
binary / weighted undirected adjacency matrix
v : Nx1 np . ndarray
eigenvector associated with the largest eigenvalue of the matrix''' | from scipy import linalg
n = len ( CIJ )
vals , vecs = linalg . eig ( CIJ )
i = np . argmax ( vals )
return np . abs ( vecs [ : , i ] ) |
def xorsum ( t ) :
"""异或校验
: param t :
: type t :
: return :
: rtype :""" | _b = t [ 0 ]
for i in t [ 1 : ] :
_b = _b ^ i
_b &= 0xff
return _b |
def put ( self , path , value ) :
"""Insert or update configuration key with value""" | return lib . zconfig_put ( self . _as_parameter_ , path , value ) |
def valid_string ( val ) :
"""Expects unicode
Char : : = # x9 | # xA | # xD | [ # x20 - # xD7FF ] | [ # xE000 - # xFFFD ] |
[ # x10000 - # x10FFFF ]""" | for char in val :
try :
char = ord ( char )
except TypeError :
raise NotValid ( "string" )
if char == 0x09 or char == 0x0A or char == 0x0D :
continue
elif 0x20 <= char <= 0xD7FF :
continue
elif 0xE000 <= char <= 0xFFFD :
continue
elif 0x10000 <= char <= 0x10FFFF :
continue
else :
raise NotValid ( "string" )
return True |
def mean_oob_mae_weight ( trees ) :
"""Returns weights proportional to the out - of - bag mean absolute error for each tree .""" | weights = [ ]
active_trees = [ ]
for tree in trees :
oob_mae = tree . out_of_bag_mae
if oob_mae is None or oob_mae . mean is None :
continue
weights . append ( oob_mae . mean )
active_trees . append ( tree )
if not active_trees :
return
weights = normalize ( weights )
return zip ( weights , active_trees ) |
def search_references ( self , reference_set_id , accession = None , md5checksum = None ) :
"""Returns an iterator over the References fulfilling the specified
conditions from the specified Dataset .
: param str reference _ set _ id : The ReferenceSet to search .
: param str accession : If not None , return the references for which the
` accession ` matches this string ( case - sensitive , exact match ) .
: param str md5checksum : If not None , return the references for which
the ` md5checksum ` matches this string ( case - sensitive , exact
match ) .
: return : An iterator over the : class : ` ga4gh . protocol . Reference `
objects defined by the query parameters .""" | request = protocol . SearchReferencesRequest ( )
request . reference_set_id = reference_set_id
request . accession = pb . string ( accession )
request . md5checksum = pb . string ( md5checksum )
request . page_size = pb . int ( self . _page_size )
return self . _run_search_request ( request , "references" , protocol . SearchReferencesResponse ) |
def post_versions_undo ( self , version_id ) :
"""Undo post version ( Requires login ) ( UNTESTED ) .
Parameters :
version _ id ( int ) :""" | return self . _get ( 'post_versions/{0}/undo.json' . format ( version_id ) , method = 'PUT' , auth = True ) |
def resize ( self , size , interp = 'nearest' ) :
"""Resize the image .
Parameters
size : int , float , or tuple
* int - Percentage of current size .
* float - Fraction of current size .
* tuple - Size of the output image .
interp : : obj : ` str ` , optional
Interpolation to use for re - sizing ( ' nearest ' , ' lanczos ' , ' bilinear ' ,
' bicubic ' , or ' cubic ' )""" | resized_data = sm . imresize ( self . data , size , interp = interp , mode = 'L' )
return SegmentationImage ( resized_data , self . _frame ) |
def url_escape ( value , plus = True ) :
"""Returns a URL - encoded version of the given value .
If ` ` plus ` ` is true ( the default ) , spaces will be represented
as " + " instead of " % 20 " . This is appropriate for query strings
but not for the path component of a URL . Note that this default
is the reverse of Python ' s urllib module .
. . versionadded : : 3.1
The ` ` plus ` ` argument""" | quote = urllib_parse . quote_plus if plus else urllib_parse . quote
return quote ( utf8 ( value ) ) |
def password_attributes_max_lockout_duration ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
password_attributes = ET . SubElement ( config , "password-attributes" , xmlns = "urn:brocade.com:mgmt:brocade-aaa" )
max_lockout_duration = ET . SubElement ( password_attributes , "max-lockout-duration" )
max_lockout_duration . text = kwargs . pop ( 'max_lockout_duration' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def pre_save ( self , * args , ** kwargs ) :
"Returns field ' s value just before saving ." | value = super ( CharField , self ) . pre_save ( * args , ** kwargs )
return self . get_prep_value ( value ) |
def generate ( command , name , init_system , overwrite , deploy , start , verbose , ** params ) :
"""Create a service .
` COMMAND ` is the path to the executable to run""" | # TODO : Add a ` prefix ` flag which can be used to prefix
# ` COMMAND ` with ` su - c ` , etc . .
try :
Serv ( init_system , verbose = verbose ) . generate ( command , name , overwrite , deploy , start , ** params )
except ServError as ex :
sys . exit ( ex ) |
def reduce ( self , start = 0 , end = None ) :
"""Returns result of applying ` self . operation `
to a contiguous subsequence of the array .
self . operation (
arr [ start ] , operation ( arr [ start + 1 ] , operation ( . . . arr [ end ] ) ) )
Parameters
start : int
beginning of the subsequence
end : int
end of the subsequences
Returns
reduced : obj
result of reducing self . operation over the specified range of array
elements .""" | if end is None :
end = self . _capacity - 1
if end < 0 :
end += self . _capacity
return self . _reduce_helper ( start , end , 1 , 0 , self . _capacity - 1 ) |
def get ( self , * , kind : Type = None , tag : Hashable = None , ** _ ) -> Iterator :
"""Get an iterator of objects by kind or tag .
kind : Any type . Pass to get a subset of contained items with the given
type .
tag : Any Hashable object . Pass to get a subset of contained items with
the given tag .
Pass both kind and tag to get objects that are both that type and that
tag .
Examples :
container . get ( type = MyObject )
container . get ( tag = " red " )
container . get ( type = MyObject , tag = " red " )""" | if kind is None and tag is None :
raise TypeError ( "get() takes at least one keyword-only argument. 'kind' or 'tag'." )
kinds = self . all
tags = self . all
if kind is not None :
kinds = self . kinds [ kind ]
if tag is not None :
tags = self . tags [ tag ]
return ( x for x in kinds . intersection ( tags ) ) |
def _BernII_to_Flavio_II ( C , udlnu , parameters ) :
"""From BernII to FlavioII basis
for charged current process semileptonic operators .
` udlnu ` should be of the form ' udl _ enu _ tau ' , ' cbl _ munu _ e ' etc .""" | p = parameters
u = uflav [ udlnu [ 0 ] ]
d = dflav [ udlnu [ 1 ] ]
l = lflav [ udlnu [ 4 : udlnu . find ( 'n' ) ] ]
lp = lflav [ udlnu [ udlnu . find ( '_' , 5 ) + 1 : len ( udlnu ) ] ]
ind = udlnu [ 0 ] + udlnu [ 1 ] + udlnu [ 4 : udlnu . find ( 'n' ) ] + udlnu [ udlnu . find ( '_' , 5 ) + 1 : len ( udlnu ) ]
ind2 = udlnu [ 1 ] + udlnu [ 0 ] + udlnu [ 4 : udlnu . find ( 'n' ) ] + 'nu' + udlnu [ udlnu . find ( '_' , 5 ) + 1 : len ( udlnu ) ]
dic = { 'CVL_' + ind2 : C [ '1' + ind ] , 'CVR_' + ind2 : C [ '1p' + ind ] , 'CSR_' + ind2 : C [ '5' + ind ] , 'CSL_' + ind2 : C [ '5p' + ind ] , 'CT_' + ind2 : C [ '7p' + ind ] }
V = ckmutil . ckm . ckm_tree ( p [ "Vus" ] , p [ "Vub" ] , p [ "Vcb" ] , p [ "delta" ] )
prefactor = - sqrt ( 2 ) / p [ 'GF' ] / V [ u , d ] / 4
return { k : prefactor * v for k , v in dic . items ( ) } |
def __fieldNorm ( self , fieldName ) :
"""Normalizes a dbf field name to fit within the spec and the
expectations of certain ESRI software .""" | if len ( fieldName ) > 11 :
fieldName = fieldName [ : 11 ]
fieldName = fieldName . upper ( )
fieldName . replace ( ' ' , '_' ) |
def fix ( self ) :
"""Return a version of the source code with PEP 8 violations fixed .""" | pep8_options = { 'ignore' : self . options . ignore , 'select' : self . options . select , 'max_line_length' : self . options . max_line_length , 'hang_closing' : self . options . hang_closing , }
results = _execute_pep8 ( pep8_options , self . source )
if self . options . verbose :
progress = { }
for r in results :
if r [ 'id' ] not in progress :
progress [ r [ 'id' ] ] = set ( )
progress [ r [ 'id' ] ] . add ( r [ 'line' ] )
print ( '---> {n} issue(s) to fix {progress}' . format ( n = len ( results ) , progress = progress ) , file = sys . stderr )
if self . options . line_range :
start , end = self . options . line_range
results = [ r for r in results if start <= r [ 'line' ] <= end ]
self . _fix_source ( filter_results ( source = '' . join ( self . source ) , results = results , aggressive = self . options . aggressive ) )
if self . options . line_range : # If number of lines has changed then change line _ range .
count = sum ( sline . count ( '\n' ) for sline in self . source [ start - 1 : end ] )
self . options . line_range [ 1 ] = start + count - 1
return '' . join ( self . source ) |
def job_success_message ( self , job , queue , job_result ) :
"""Return the message to log when a job is successful""" | return '[%s|%s|%s] success, in %s' % ( queue . _cached_name , job . pk . get ( ) , job . _cached_identifier , job . duration ) |
def _send_cmd ( self , cmd : str ) :
"""Encode IQFeed API messages .""" | self . _sock . sendall ( cmd . encode ( encoding = 'latin-1' , errors = 'strict' ) ) |
def parse_bss ( bss ) :
"""Parse data prepared by nla _ parse ( ) and nla _ parse _ nested ( ) into Python - friendly formats .
Automatically chooses the right data - type for each attribute and converts it into Python integers , strings , unicode ,
etc objects .
Positional arguments :
bss - - dictionary with integer keys and nlattr values .
Returns :
New dictionary with the same integer keys and converted values . Excludes null / empty data from ` bss ` .""" | # First parse data into Python data types . Weed out empty values .
intermediate = dict ( )
_get ( intermediate , bss , 'NL80211_BSS_BSSID' , libnl . attr . nla_data )
# MAC address of access point .
_get ( intermediate , bss , 'NL80211_BSS_FREQUENCY' , libnl . attr . nla_get_u32 )
# Frequency in MHz .
_get ( intermediate , bss , 'NL80211_BSS_TSF' , libnl . attr . nla_get_msecs )
# Timing Synchronization Function .
_get ( intermediate , bss , 'NL80211_BSS_BEACON_INTERVAL' , libnl . attr . nla_get_u16 )
_get ( intermediate , bss , 'NL80211_BSS_CAPABILITY' , libnl . attr . nla_get_u16 )
_get ( intermediate , bss , 'NL80211_BSS_INFORMATION_ELEMENTS' , libnl . attr . nla_data )
_get ( intermediate , bss , 'NL80211_BSS_SIGNAL_MBM' , libnl . attr . nla_get_u32 )
_get ( intermediate , bss , 'NL80211_BSS_SIGNAL_UNSPEC' , libnl . attr . nla_get_u8 )
_get ( intermediate , bss , 'NL80211_BSS_STATUS' , libnl . attr . nla_get_u32 )
_get ( intermediate , bss , 'NL80211_BSS_SEEN_MS_AGO' , libnl . attr . nla_get_u32 )
_get ( intermediate , bss , 'NL80211_BSS_BEACON_IES' , libnl . attr . nla_data )
# Parse easy data into final Python types .
parsed = dict ( )
if 'bssid' in intermediate :
parsed [ 'bssid' ] = ':' . join ( format ( x , '02x' ) for x in intermediate [ 'bssid' ] [ : 6 ] )
if 'frequency' in intermediate :
parsed [ 'frequency' ] = intermediate [ 'frequency' ]
if 'tsf' in intermediate :
parsed [ 'tsf' ] = timedelta ( microseconds = intermediate [ 'tsf' ] )
if 'beacon_interval' in intermediate :
parsed [ 'beacon_interval' ] = intermediate [ 'beacon_interval' ]
if 'signal_mbm' in intermediate :
data_u32 = intermediate [ 'signal_mbm' ]
data_s32 = - ( data_u32 & 0x80000000 ) + ( data_u32 & 0x7fffffff )
parsed [ 'signal_mbm' ] = data_s32 / 100.0
if 'signal_unspec' in intermediate :
parsed [ 'signal_unspec' ] = intermediate [ 'signal_unspec' ] / 100.0
if 'seen_ms_ago' in intermediate :
parsed [ 'seen_ms_ago' ] = timedelta ( milliseconds = intermediate [ 'seen_ms_ago' ] )
# Handle status .
if intermediate . get ( 'status' ) == nl80211 . NL80211_BSS_STATUS_AUTHENTICATED :
parsed [ 'status' ] = 'authenticated'
elif intermediate . get ( 'status' ) == nl80211 . NL80211_BSS_STATUS_ASSOCIATED :
parsed [ 'status' ] = 'associated'
elif intermediate . get ( 'status' ) == nl80211 . NL80211_BSS_STATUS_IBSS_JOINED :
parsed [ 'status' ] = 'joined'
elif 'status' in intermediate :
parsed [ 'status' ] = 'unknown status: {0}' . format ( intermediate [ 'status' ] )
# Handle capability .
if 'capability' in intermediate : # http : / / git . kernel . org / cgit / linux / kernel / git / jberg / iw . git / tree / scan . c ? id = v3.17 # n1479
data = intermediate [ 'capability' ]
list_of_caps = list ( )
if parsed [ 'frequency' ] > 45000 :
if data & iw_scan . WLAN_CAPABILITY_DMG_TYPE_MASK == iw_scan . WLAN_CAPABILITY_DMG_TYPE_AP :
list_of_caps . append ( 'DMG_ESS' )
elif data & iw_scan . WLAN_CAPABILITY_DMG_TYPE_MASK == iw_scan . WLAN_CAPABILITY_DMG_TYPE_PBSS :
list_of_caps . append ( 'DMG_PCP' )
elif data & iw_scan . WLAN_CAPABILITY_DMG_TYPE_MASK == iw_scan . WLAN_CAPABILITY_DMG_TYPE_IBSS :
list_of_caps . append ( 'DMG_IBSS' )
if data & iw_scan . WLAN_CAPABILITY_DMG_CBAP_ONLY :
list_of_caps . append ( 'CBAP_Only' )
if data & iw_scan . WLAN_CAPABILITY_DMG_CBAP_SOURCE :
list_of_caps . append ( 'CBAP_Src' )
if data & iw_scan . WLAN_CAPABILITY_DMG_PRIVACY :
list_of_caps . append ( 'Privacy' )
if data & iw_scan . WLAN_CAPABILITY_DMG_ECPAC :
list_of_caps . append ( 'ECPAC' )
if data & iw_scan . WLAN_CAPABILITY_DMG_SPECTRUM_MGMT :
list_of_caps . append ( 'SpectrumMgmt' )
if data & iw_scan . WLAN_CAPABILITY_DMG_RADIO_MEASURE :
list_of_caps . append ( 'RadioMeasure' )
else :
if data & iw_scan . WLAN_CAPABILITY_ESS :
list_of_caps . append ( 'ESS' )
if data & iw_scan . WLAN_CAPABILITY_IBSS :
list_of_caps . append ( 'IBSS' )
if data & iw_scan . WLAN_CAPABILITY_CF_POLLABLE :
list_of_caps . append ( 'CfPollable' )
if data & iw_scan . WLAN_CAPABILITY_CF_POLL_REQUEST :
list_of_caps . append ( 'CfPollReq' )
if data & iw_scan . WLAN_CAPABILITY_PRIVACY :
list_of_caps . append ( 'Privacy' )
if data & iw_scan . WLAN_CAPABILITY_SHORT_PREAMBLE :
list_of_caps . append ( 'ShortPreamble' )
if data & iw_scan . WLAN_CAPABILITY_PBCC :
list_of_caps . append ( 'PBCC' )
if data & iw_scan . WLAN_CAPABILITY_CHANNEL_AGILITY :
list_of_caps . append ( 'ChannelAgility' )
if data & iw_scan . WLAN_CAPABILITY_SPECTRUM_MGMT :
list_of_caps . append ( 'SpectrumMgmt' )
if data & iw_scan . WLAN_CAPABILITY_QOS :
list_of_caps . append ( 'QoS' )
if data & iw_scan . WLAN_CAPABILITY_SHORT_SLOT_TIME :
list_of_caps . append ( 'ShortSlotTime' )
if data & iw_scan . WLAN_CAPABILITY_APSD :
list_of_caps . append ( 'APSD' )
if data & iw_scan . WLAN_CAPABILITY_RADIO_MEASURE :
list_of_caps . append ( 'RadioMeasure' )
if data & iw_scan . WLAN_CAPABILITY_DSSS_OFDM :
list_of_caps . append ( 'DSSS-OFDM' )
if data & iw_scan . WLAN_CAPABILITY_DEL_BACK :
list_of_caps . append ( 'DelayedBACK' )
if data & iw_scan . WLAN_CAPABILITY_IMM_BACK :
list_of_caps . append ( 'ImmediateBACK' )
parsed [ 'capability' ] = list_of_caps
# Handle ( beacon ) information elements .
for k in ( 'information_elements' , 'beacon_ies' ) :
if k not in intermediate :
continue
parsed [ k ] = iw_scan . get_ies ( intermediate [ k ] )
# Make some data more human - readable .
parsed [ 'signal' ] = parsed . get ( 'signal_mbm' , parsed . get ( 'signal_unspec' ) )
parsed [ 'channel' ] = _fetch ( parsed , 'DS Parameter set' )
parsed [ 'ssid' ] = _fetch ( parsed , 'SSID' ) or _fetch ( parsed , 'MESH ID' ) or ''
parsed [ 'supported_rates' ] = _fetch ( parsed , 'Supported rates' )
parsed [ 'extended_supported_rates' ] = _fetch ( parsed , 'Extended supported rates' )
parsed [ 'channel_width' ] = _fetch ( parsed , 'HT operation' , 'STA channel width' )
return parsed |
def pclass_field_for_attribute ( self ) :
""": return : A pyrsistent field reflecting this attribute and its type model .""" | return self . type_model . pclass_field_for_type ( required = self . required , default = self . default , ) |
def next_frame_sv2p_tiny ( ) :
"""Tiny SV2P model .""" | hparams = next_frame_sv2p_atari_softmax ( )
hparams . batch_size = 2
hparams . tiny_mode = True
hparams . num_masks = 1
hparams . video_modality_loss_cutoff = 0.4
hparams . video_num_input_frames = 4
hparams . video_num_target_frames = 4
return hparams |
def declare_consumer ( self , queue , no_ack , callback , consumer_tag , nowait = False ) :
"""Declare a consumer .""" | @ functools . wraps ( callback )
def _callback_decode ( channel , method , header , body ) :
return callback ( ( channel , method , header , body ) )
return self . channel . basic_consume ( _callback_decode , queue = queue , no_ack = no_ack , consumer_tag = consumer_tag ) |
def simulated_quantize ( x , num_bits , noise ) :
"""Simulate quantization to num _ bits bits , with externally - stored scale .
num _ bits is the number of bits used to store each value .
noise is a float32 Tensor containing values in [ 0 , 1 ) .
Each value in noise should take different values across
different steps , approximating a uniform distribution over [ 0 , 1 ) .
In the case of replicated TPU training , noise should be identical
across replicas in order to keep the parameters identical across replicas .
The natural choice for noise would be tf . random _ uniform ( ) ,
but this is not possible for TPU , since there is currently no way to seed
the different cores to produce identical values across replicas . Instead we
use noise _ from _ step _ num ( ) ( see below ) .
The quantization scheme is as follows :
Compute the maximum absolute value by row ( call this max _ abs ) .
Store this either in an auxiliary variable or in an extra column .
Divide the parameters by ( max _ abs / ( 2 ^ ( num _ bits - 1 ) - 1 ) ) . This gives a
float32 value in the range [ - 2 ^ ( num _ bits - 1 ) - 1 , 2 ^ ( num _ bits - 1 ) - 1]
Unbiased randomized roundoff by adding noise and rounding down .
This produces a signed integer with num _ bits bits which can then be stored .
Args :
x : a float32 Tensor
num _ bits : an integer between 1 and 22
noise : a float Tensor broadcastable to the shape of x .
Returns :
a float32 Tensor""" | shape = x . get_shape ( ) . as_list ( )
if not ( len ( shape ) >= 2 and shape [ - 1 ] > 1 ) :
return x
max_abs = tf . reduce_max ( tf . abs ( x ) , - 1 , keepdims = True ) + 1e-9
max_int = 2 ** ( num_bits - 1 ) - 1
scale = max_abs / max_int
x /= scale
x = tf . floor ( x + noise )
# dequantize before storing ( since this is a simulation )
x *= scale
return x |
def yubiotp ( ctx , slot , public_id , private_id , key , no_enter , force , serial_public_id , generate_private_id , generate_key ) :
"""Program a Yubico OTP credential .""" | dev = ctx . obj [ 'dev' ]
controller = ctx . obj [ 'controller' ]
if public_id and serial_public_id :
ctx . fail ( 'Invalid options: --public-id conflicts with ' '--serial-public-id.' )
if private_id and generate_private_id :
ctx . fail ( 'Invalid options: --private-id conflicts with ' '--generate-public-id.' )
if key and generate_key :
ctx . fail ( 'Invalid options: --key conflicts with --generate-key.' )
if not public_id :
if serial_public_id :
if dev . serial is None :
ctx . fail ( 'Serial number not set, public ID must be provided' )
public_id = modhex_encode ( b'\xff\x00' + struct . pack ( b'>I' , dev . serial ) )
click . echo ( 'Using YubiKey serial as public ID: {}' . format ( public_id ) )
elif force :
ctx . fail ( 'Public ID not given. Please remove the --force flag, or ' 'add the --serial-public-id flag or --public-id option.' )
else :
public_id = click . prompt ( 'Enter public ID' , err = True )
try :
public_id = modhex_decode ( public_id )
except KeyError :
ctx . fail ( 'Invalid public ID, must be modhex.' )
if not private_id :
if generate_private_id :
private_id = os . urandom ( 6 )
click . echo ( 'Using a randomly generated private ID: {}' . format ( b2a_hex ( private_id ) . decode ( 'ascii' ) ) )
elif force :
ctx . fail ( 'Private ID not given. Please remove the --force flag, or ' 'add the --generate-private-id flag or --private-id option.' )
else :
private_id = click . prompt ( 'Enter private ID' , err = True )
private_id = a2b_hex ( private_id )
if not key :
if generate_key :
key = os . urandom ( 16 )
click . echo ( 'Using a randomly generated secret key: {}' . format ( b2a_hex ( key ) . decode ( 'ascii' ) ) )
elif force :
ctx . fail ( 'Secret key not given. Please remove the --force flag, or ' 'add the --generate-key flag or --key option.' )
else :
key = click . prompt ( 'Enter secret key' , err = True )
key = a2b_hex ( key )
force or click . confirm ( 'Program an OTP credential in slot {}?' . format ( slot ) , abort = True , err = True )
try :
controller . program_otp ( slot , key , public_id , private_id , not no_enter )
except YkpersError as e :
_failed_to_write_msg ( ctx , e ) |
def weights_multi_problem ( labels , taskid = - 1 ) :
"""Assign weight 1.0 to only the " targets " portion of the labels .
Weight 1.0 is assigned to all labels past the taskid .
Args :
labels : A Tensor of int32s .
taskid : an int32 representing the task id for a problem .
Returns :
A Tensor of floats .
Raises :
ValueError : The Task ID must be valid .""" | taskid = check_nonnegative ( taskid )
past_taskid = tf . cumsum ( to_float ( tf . equal ( labels , taskid ) ) , axis = 1 )
# Additionally zero out the task id location
past_taskid *= to_float ( tf . not_equal ( labels , taskid ) )
non_taskid = to_float ( labels )
return to_float ( tf . not_equal ( past_taskid * non_taskid , 0 ) ) |
def move_leadership ( self , partition , new_leader ) :
"""Return a new state that is the result of changing the leadership of
a single partition .
: param partition : The partition index of the partition to change the
leadership of .
: param new _ leader : The broker index of the new leader replica .""" | new_state = copy ( self )
# Update the partition replica tuple
source = new_state . replicas [ partition ] [ 0 ]
new_leader_index = self . replicas [ partition ] . index ( new_leader )
new_state . replicas = tuple_alter ( self . replicas , ( partition , lambda replicas : tuple_replace ( replicas , ( 0 , replicas [ new_leader_index ] ) , ( new_leader_index , replicas [ 0 ] ) , ) ) , )
new_state . pending_partitions = self . pending_partitions + ( partition , )
# Update the leader count
new_state . broker_leader_counts = tuple_alter ( self . broker_leader_counts , ( source , lambda leader_count : leader_count - 1 ) , ( new_leader , lambda leader_count : leader_count + 1 ) , )
# Update the broker leader weights
partition_weight = self . partition_weights [ partition ]
new_state . broker_leader_weights = tuple_alter ( self . broker_leader_weights , ( source , lambda leader_weight : leader_weight - partition_weight ) , ( new_leader , lambda leader_weight : leader_weight + partition_weight ) , )
# Update the total leader movement size
new_state . leader_movement_count += 1
return new_state |
def reset ( self ) :
"Close the current failed connection and prepare for a new one" | log . info ( "resetting client" )
rpc_client = self . _rpc_client
self . _addrs . append ( self . _peer . addr )
self . __init__ ( self . _addrs )
self . _rpc_client = rpc_client
self . _dispatcher . rpc_client = rpc_client
rpc_client . _client = weakref . ref ( self ) |
def ensure_rng ( random_state = None ) :
"""Creates a random number generator based on an optional seed . This can be
an integer or another random state for a seeded rng , or None for an
unseeded rng .""" | if random_state is None :
random_state = np . random . RandomState ( )
elif isinstance ( random_state , int ) :
random_state = np . random . RandomState ( random_state )
else :
assert isinstance ( random_state , np . random . RandomState )
return random_state |
async def request ( self , method , url = None , * , path = '' , retries = 1 , connection_timeout = 60 , ** kwargs ) :
'''This is the template for all of the ` http method ` methods for
the Session .
Args :
method ( str ) : A http method , such as ' GET ' or ' POST ' .
url ( str ) : The url the request should be made to .
path ( str ) : An optional kw - arg for use in Session method calls ,
for specifying a particular path . Usually to be used in
conjunction with the base _ location / endpoint paradigm .
kwargs : Any number of the following :
data ( dict or str ) : Info to be processed as a
body - bound query .
params ( dict or str ) : Info to be processed as a
url - bound query .
headers ( dict ) : User HTTP headers to be used in the
request .
encoding ( str ) : The str representation of the codec to
process the request under .
json ( dict ) : A dict to be formatted as json and sent in
the request body .
files ( dict ) : A dict of ` filename : filepath ` s to be sent
as multipart .
cookies ( dict ) : A dict of ` name : value ` cookies to be
passed in request .
callback ( func ) : A callback function to be called on
each bytechunk of of the response body .
timeout ( int or float ) : A numeric representation of the
longest time to wait on a complete response once a
request has been sent .
retries ( int ) : The number of attempts to try against
connection errors .
max _ redirects ( int ) : The maximum number of redirects
allowed .
persist _ cookies ( True or None ) : Passing True
instantiates a CookieTracker object to manage the
return of cookies to the server under the relevant
domains .
auth ( child of AuthBase ) : An object for handling auth
construction .
When you call something like Session . get ( ) or asks . post ( ) , you ' re
really calling a partial method that has the ' method ' argument
pre - completed .''' | timeout = kwargs . get ( 'timeout' , None )
req_headers = kwargs . pop ( 'headers' , None )
if self . headers is not None :
headers = copy ( self . headers )
if req_headers is not None :
headers . update ( req_headers )
req_headers = headers
async with self . sema :
if url is None :
url = self . _make_url ( ) + path
retry = False
sock = None
try :
sock = await timeout_manager ( connection_timeout , self . _grab_connection , url )
port = sock . port
req_obj = RequestProcessor ( self , method , url , port , headers = req_headers , encoding = self . encoding , sock = sock , persist_cookies = self . _cookie_tracker , ** kwargs )
try :
if timeout is None :
sock , r = await req_obj . make_request ( )
else :
sock , r = await timeout_manager ( timeout , req_obj . make_request )
except BadHttpResponse :
if timeout is None :
sock , r = await req_obj . make_request ( )
else :
sock , r = await timeout_manager ( timeout , req_obj . make_request )
if sock is not None :
try :
if r . headers [ 'connection' ] . lower ( ) == 'close' :
sock . _active = False
await sock . close ( )
except KeyError :
pass
await self . return_to_pool ( sock )
# ConnectionErrors are special . They are the only kind of exception
# we ever want to suppress . All other exceptions are re - raised or
# raised through another exception .
except ConnectionError as e :
if retries > 0 :
retry = True
retries -= 1
else :
raise e
except Exception as e :
if sock :
await self . _handle_exception ( e , sock )
raise
# any BaseException is considered unlawful murder , and
# Session . cleanup should be called to tidy up sockets .
except BaseException as e :
if sock :
await sock . close ( )
raise e
if retry :
return ( await self . request ( method , url , path = path , retries = retries , headers = headers , ** kwargs ) )
return r |
def set_client ( self , * args , ** kwargs ) :
'''Se você possui informações cadastradas sobre o comprador você pode utilizar
este método para enviar estas informações para o PagSeguro . É uma boa prática pois
evita que seu cliente tenha que preencher estas informações novamente na página
do PagSeguro .
Args :
name ( str ) : ( opcional ) Nome do cliente
email ( str ) : ( opcional ) Email do cliente
phone _ area _ code ( str ) : ( opcional ) Código de área do telefone do cliente . Um número com 2 digitos .
phone _ number ( str ) : ( opcional ) O número de telefone do cliente .
cpf : ( str ) : ( opcional ) Número do cpf do comprador
born _ date : ( date ) : Data de nascimento no formato dd / MM / yyyy
Exemplo :
> > > from pagseguro import Payment
> > > from pagseguro import local _ settings
> > > payment = Payment ( email = local _ settings . PAGSEGURO _ ACCOUNT _ EMAIL , token = local _ settings . PAGSEGURO _ TOKEN , sandbox = True )
> > > payment . set _ client ( name = u ' Adam Yauch ' , phone _ area _ code = 11)''' | self . client = { }
for arg , value in kwargs . iteritems ( ) :
if value :
self . client [ arg ] = value
client_schema ( self . client ) |
def ExtractEvents ( self , parser_mediator , registry_key , codepage = 'cp1252' , ** kwargs ) :
"""Extracts events from a Windows Registry key .
Args :
parser _ mediator ( ParserMediator ) : mediates interactions between parsers
and other components , such as storage and dfvfs .
registry _ key ( dfwinreg . WinRegistryKey ) : Windows Registry key .
codepage ( Optional [ str ] ) : extended ASCII string codepage .""" | self . _ParseMRUListKey ( parser_mediator , registry_key , codepage = codepage ) |
def _init_glyph ( self , plot , mapping , properties ) :
"""Returns a Bokeh glyph object .""" | properties . pop ( 'legend' , None )
for prop in [ 'color' , 'alpha' ] :
if prop not in properties :
continue
pval = properties . pop ( prop )
line_prop = 'line_%s' % prop
fill_prop = 'fill_%s' % prop
if line_prop not in properties :
properties [ line_prop ] = pval
if fill_prop not in properties and fill_prop in self . style_opts :
properties [ fill_prop ] = pval
properties = mpl_to_bokeh ( properties )
plot_method = self . _plot_methods [ 'single' ]
glyph = plot_method ( ** dict ( properties , ** mapping ) )
plot . add_layout ( glyph )
return None , glyph |
def value ( self , raw_value ) :
"""Decode param as decimal value .""" | try :
return decimal . Decimal ( raw_value )
except decimal . InvalidOperation :
raise ValueError ( "Could not parse '{}' value as decimal" . format ( raw_value ) ) |
def write_svg_debug ( matrix , version , out , scale = 15 , border = None , fallback_color = 'fuchsia' , color_mapping = None , add_legend = True ) :
"""Internal SVG serializer which is useful to debugging purposes .
This function is not exposed to the QRCode class by intention and the
resulting SVG document is very inefficient ( lots of < rect / > s ) .
Dark modules are black and light modules are white by default . Provide
a custom ` color _ mapping ` to override these defaults .
Unknown modules are red by default .
: param matrix : The matrix
: param version : Version constant
: param out : binary file - like object or file name
: param scale : Scaling factor
: param border : Quiet zone
: param fallback _ color : Color which is used for modules which are not 0x0 or 0x1
and for which no entry in ` color _ mapping ` is defined .
: param color _ mapping : dict of module values to color mapping ( optional )
: param bool add _ legend : Indicates if the bit values should be added to the
matrix ( default : True )""" | clr_mapping = { 0x0 : '#fff' , 0x1 : '#000' , 0x2 : 'red' , 0x3 : 'orange' , 0x4 : 'gold' , 0x5 : 'green' , }
if color_mapping is not None :
clr_mapping . update ( color_mapping )
border = get_border ( version , border )
width , height = get_symbol_size ( version , scale , border )
matrix_size = get_symbol_size ( version , scale = 1 , border = 0 ) [ 0 ]
with writable ( out , 'wt' , encoding = 'utf-8' ) as f :
legend = [ ]
write = f . write
write ( '<?xml version="1.0" encoding="utf-8"?>\n' )
write ( '<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 {0} {1}">' . format ( width , height ) )
write ( '<style type="text/css"><![CDATA[ text { font-size: 1px; font-family: Helvetica, Arial, sans; } ]]></style>' )
write ( '<g transform="scale({0})">' . format ( scale ) )
for i in range ( matrix_size ) :
y = i + border
for j in range ( matrix_size ) :
x = j + border
bit = matrix [ i ] [ j ]
if add_legend and bit not in ( 0x0 , 0x1 ) :
legend . append ( ( x , y , bit ) )
fill = clr_mapping . get ( bit , fallback_color )
write ( '<rect x="{0}" y="{1}" width="1" height="1" fill="{2}"/>' . format ( x , y , fill ) )
# legend may be empty if add _ legend = = False
for x , y , val in legend :
write ( '<text x="{0}" y="{1}">{2}</text>' . format ( x + .2 , y + .9 , val ) )
write ( '</g></svg>\n' ) |
def split ( input_layer , split_dim = 0 , num_splits = 2 ) :
"""Splits this Tensor along the split _ dim into num _ splits Equal chunks .
Examples :
* ` [ 1 , 2 , 3 , 4 ] - > [ 1 , 2 ] , [ 3 , 4 ] `
* ` [ [ 1 , 1 ] , [ 2 , 2 ] , [ 3 , 3 ] , [ 4 , 4 ] ] - > [ [ 1 , 1 ] , [ 2 , 2 ] ] , [ [ 3 , 3 ] , [ 4 , 4 ] ] `
Args :
input _ layer : The chainable object , supplied .
split _ dim : The dimension to split along . Defaults to batch .
num _ splits : The number of splits .
Returns :
A list of PrettyTensors .
Raises :
ValueError : If split _ dim is out of range or isn ' t divided evenly by
num _ splits .""" | shape = input_layer . shape
_check_split_dims ( num_splits , split_dim , shape )
splits = tf . split ( value = input_layer , num_or_size_splits = num_splits , axis = split_dim )
return input_layer . with_sequence ( splits ) |
def create_hit ( self , title , description , keywords , reward , duration_hours , lifetime_days , ad_url , notification_url , approve_requirement , max_assignments , us_only , blacklist = None , annotation = None , ) :
"""Create the actual HIT and return a dict with its useful properties .""" | frame_height = 600
mturk_question = self . _external_question ( ad_url , frame_height )
qualifications = self . build_hit_qualifications ( approve_requirement , us_only , blacklist )
# We need a HIT _ Type in order to register for REST notifications
hit_type_id = self . register_hit_type ( title , description , reward , duration_hours , keywords , qualifications )
self . set_rest_notification ( notification_url , hit_type_id )
params = { "HITTypeId" : hit_type_id , "Question" : mturk_question , "LifetimeInSeconds" : int ( datetime . timedelta ( days = lifetime_days ) . total_seconds ( ) ) , "MaxAssignments" : max_assignments , "UniqueRequestToken" : self . _request_token ( ) , }
if annotation :
params [ "RequesterAnnotation" ] = annotation
response = self . mturk . create_hit_with_hit_type ( ** params )
if "HIT" not in response :
raise MTurkServiceException ( "HIT request was invalid for unknown reason." )
return self . _translate_hit ( response [ "HIT" ] ) |
def logp_plus_loglike ( self ) :
'''The summed log - probability of all stochastic variables that depend on
self . stochastics , and self . stochastics .''' | sum = logp_of_set ( self . markov_blanket )
if self . verbose > 2 :
print_ ( '\t' + self . _id + ' Current log-likelihood plus current log-probability' , sum )
return sum |
def get_events ( self ) :
"""Returns a list of all joystick events that have occurred since the last
call to ` get _ events ` . The list contains events in the order that they
occurred . If no events have occurred in the intervening time , the
result is an empty list .""" | result = [ ]
while self . _wait ( 0 ) :
event = self . _read ( )
if event :
result . append ( event )
return result |
def has_readonly ( self , s ) :
"""Tests whether store ` s ` is read - only .""" | for t in self . transitions :
if list ( t . lhs [ s ] ) != list ( t . rhs [ s ] ) :
return False
return True |
def check_import ( ) :
"""Try to import the aeneas package and return ` ` True ` ` if that fails .""" | try :
import aeneas
print_success ( u"aeneas OK" )
return False
except ImportError :
print_error ( u"aeneas ERROR" )
print_info ( u" Unable to load the aeneas Python package" )
print_info ( u" This error is probably caused by:" )
print_info ( u" A. you did not download/git-clone the aeneas package properly; or" )
print_info ( u" B. you did not install the required Python packages:" )
print_info ( u" 1. BeautifulSoup4" )
print_info ( u" 2. lxml" )
print_info ( u" 3. numpy" )
except Exception as e :
print_error ( e )
return True |
def require_json ( ) :
"""Load the best available json library on demand .""" | # Fails when " json " is missing and " simplejson " is not installed either
try :
import json
# pylint : disable = F0401
return json
except ImportError :
try :
import simplejson
# pylint : disable = F0401
return simplejson
except ImportError as exc :
raise ImportError ( """Please 'pip install "simplejson>=2.1.6"' (%s)""" % ( exc , ) ) |
def _get_name_from_content_type ( self , request ) :
"""Get name from Content - Type header""" | content_type = request . META . get ( 'CONTENT_TYPE' , None )
if content_type : # remove the possible charset - encoding info
return util . strip_charset ( content_type )
return None |
def get_supported_boot_mode ( self ) :
"""Retrieves the supported boot mode .
: returns : any one of the following proliantutils . ilo . constants :
SUPPORTED _ BOOT _ MODE _ LEGACY _ BIOS _ ONLY ,
SUPPORTED _ BOOT _ MODE _ UEFI _ ONLY ,
SUPPORTED _ BOOT _ MODE _ LEGACY _ BIOS _ AND _ UEFI""" | system = self . _get_host_details ( )
bios_uefi_class_val = 0
# value for bios _ only boot mode
if ( 'Bios' in system [ 'Oem' ] [ 'Hp' ] and 'UefiClass' in system [ 'Oem' ] [ 'Hp' ] [ 'Bios' ] ) :
bios_uefi_class_val = ( system [ 'Oem' ] [ 'Hp' ] [ 'Bios' ] [ 'UefiClass' ] )
return mappings . GET_SUPPORTED_BOOT_MODE_RIS_MAP . get ( bios_uefi_class_val ) |
def main ( self , c ) :
""": type c : Complex
: rtype : Sfix""" | conj = self . conjugate . main ( c )
mult = self . complex_mult . main ( c , conj )
angle = self . angle . main ( mult )
self . y = self . GAIN_SFIX * angle
return self . y |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.