signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def _dgram_send_and_receive ( self , _socket , message , buffer_size = 4096 , * args ) :
"""User Datagram Protocol sender and receiver"""
|
_socket . send ( message . encode ( 'utf-8' ) )
data , address = _socket . recvfrom ( buffer_size )
return BytesIO ( data )
|
def ndim ( self ) -> int :
"""Get number of dimensions ."""
|
try :
assert self . _ndim is not None
except ( AssertionError , AttributeError ) :
if len ( self . variables ) == 0 :
self . _ndim = 0
else :
self . _ndim = self . variables [ 0 ] . ndim
finally :
return self . _ndim
|
def digest ( self ) :
'''Get digest .
: return :'''
|
return salt . utils . stringutils . to_str ( self . __digest . hexdigest ( ) + os . linesep )
|
def load_module ( self , module ) :
"""Create and return a test suite containing all cases loaded from the
provided module .
Parameters
module : module
A module object containing ` ` TestCases ` `"""
|
cases = self . get_test_cases_from_module ( module )
suites = [ self . load_case ( case ) for case in cases ]
return self . create_suite ( suites )
|
def print_summary ( self , strm ) :
"""Print summary of lint ."""
|
nerr = 0
nerr += LintHelper . _print_summary_map ( strm , self . cpp_header_map , 'cpp-header' )
nerr += LintHelper . _print_summary_map ( strm , self . cpp_src_map , 'cpp-soruce' )
nerr += LintHelper . _print_summary_map ( strm , self . python_map , 'python' )
if nerr == 0 :
strm . write ( 'All passed!\n' )
else :
strm . write ( '%d files failed lint\n' % nerr )
return nerr
|
def restore_sampler ( fname ) :
"""Creates a new sampler from an hdf5 database ."""
|
hf = tables . open_file ( fname )
fnode = hf . root . __sampler__
import pickle
sampler = pickle . load ( fnode )
return sampler
|
def configure_attributes ( self , json_data ) :
"""Configure load balancer attributes such as idle timeout , connection draining , etc
Args :
json _ data ( json ) : return data from ELB upsert"""
|
env = boto3 . session . Session ( profile_name = self . env , region_name = self . region )
elbclient = env . client ( 'elb' )
elb_settings = self . properties [ 'elb' ]
LOG . debug ( 'Block ELB Settings Pre Configure Load Balancer Attributes:\n%s' , pformat ( elb_settings ) )
# FIXME : Determine why ' job ' is not being used
# pylint : disable = unused - variable
for job in json . loads ( json_data ) [ 'job' ] :
load_balancer_attributes = { 'CrossZoneLoadBalancing' : { 'Enabled' : True } , 'AccessLog' : { 'Enabled' : False , } , 'ConnectionDraining' : { 'Enabled' : False , } , 'ConnectionSettings' : { 'IdleTimeout' : 60 } }
if elb_settings . get ( 'connection_draining_timeout' ) :
connection_draining_timeout = int ( elb_settings [ 'connection_draining_timeout' ] )
LOG . info ( 'Applying Custom Load Balancer Connection Draining Timeout: %d' , connection_draining_timeout )
load_balancer_attributes [ 'ConnectionDraining' ] = { 'Enabled' : True , 'Timeout' : connection_draining_timeout }
if elb_settings . get ( 'idle_timeout' ) :
idle_timeout = int ( elb_settings [ 'idle_timeout' ] )
LOG . info ( 'Applying Custom Load Balancer Idle Timeout: %d' , idle_timeout )
load_balancer_attributes [ 'ConnectionSettings' ] = { 'IdleTimeout' : idle_timeout }
if elb_settings . get ( 'access_log' ) :
access_log_bucket_name = elb_settings [ 'access_log' ] [ 'bucket_name' ]
access_log_bucket_prefix = elb_settings [ 'access_log' ] [ 'bucket_prefix' ]
access_log_emit_interval = int ( elb_settings [ 'access_log' ] [ 'emit_interval' ] )
LOG . info ( 'Applying Custom Load Balancer Access Log: %s/%s every %d minutes' , access_log_bucket_name , access_log_bucket_prefix , access_log_emit_interval )
load_balancer_attributes [ 'AccessLog' ] = { 'Enabled' : True , 'S3BucketName' : access_log_bucket_name , 'EmitInterval' : access_log_emit_interval , 'S3BucketPrefix' : access_log_bucket_prefix }
LOG . info ( 'Applying Load Balancer Attributes' )
LOG . debug ( 'Load Balancer Attributes:\n%s' , pformat ( load_balancer_attributes ) )
elbclient . modify_load_balancer_attributes ( LoadBalancerName = self . app , LoadBalancerAttributes = load_balancer_attributes )
|
def insert_after ( self , sibling , row = None ) :
"""insert _ after ( sibling , row = None )
: param sibling : A valid : obj : ` Gtk . TreeIter ` , or : obj : ` None `
: type sibling : : obj : ` Gtk . TreeIter ` or : obj : ` None `
: param row : a list of values to apply to the newly inserted row or : obj : ` None `
: type row : [ : obj : ` object ` ] or : obj : ` None `
: returns : : obj : ` Gtk . TreeIter ` pointing to the new row
: rtype : : obj : ` Gtk . TreeIter `
Inserts a new row after ` sibling ` . If ` sibling ` is : obj : ` None ` , then
the row will be prepended to the beginning of the list .
The row will be empty if ` row ` is : obj : ` None . To fill in values , you
need to call : obj : ` Gtk . ListStore . set ` \\ ( ) or
: obj : ` Gtk . ListStore . set _ value ` \\ ( ) .
If ` row ` isn ' t : obj : ` None ` it has to be a list of values which will be
used to fill the row ."""
|
treeiter = Gtk . ListStore . insert_after ( self , sibling )
if row is not None :
self . set_row ( treeiter , row )
return treeiter
|
def send_no_servlet_response ( self ) :
"""Default response sent when no servlet is found for the requested path"""
|
# Use the helper to send the error page
response = _HTTPServletResponse ( self )
response . send_content ( 404 , self . _service . make_not_found_page ( self . path ) )
|
def get_natural_key_fields ( cls ) :
"""Determine actual natural key field list , incorporating the natural keys
of related objects as needed ."""
|
natural_key = [ ]
for name , rel_to in cls . get_natural_key_info ( ) :
if not rel_to :
natural_key . append ( name )
else :
nested_key = rel_to . get_natural_key_fields ( )
natural_key . extend ( [ name + '__' + nname for nname in nested_key ] )
return natural_key
|
def safe_to_write ( self , regs , i = 0 , end_ = 0 ) :
"""Given a list of registers ( 8 or 16 bits ) returns a list of them
that are safe to modify from the given index until the position given
which , if omitted , defaults to the end of the block .
: param regs : register or iterable of registers ( 8 or 16 bit one )
: param i : initial position of the block to examine
: param end _ : final position to examine
: returns : registers safe to write"""
|
if is_register ( regs ) :
regs = set ( single_registers ( regs ) )
else :
regs = set ( single_registers ( x ) for x in regs )
return not regs . intersection ( self . requires ( i , end_ ) )
|
def set_contents_from_file ( self , fp , headers = None , replace = True , cb = None , num_cb = 10 , policy = None , md5 = None ) :
"""Store an object in a file using the name of the Key object as the
key in file URI and the contents of the file pointed to by ' fp ' as the
contents .
: type fp : file
: param fp : the file whose contents to upload
: type headers : dict
: param headers : ignored in this subclass .
: type replace : bool
: param replace : If this parameter is False , the method
will first check to see if an object exists in the
bucket with the same key . If it does , it won ' t
overwrite it . The default value is True which will
overwrite the object .
: type cb : function
: param cb : ignored in this subclass .
: type cb : int
: param num _ cb : ignored in this subclass .
: type policy : : class : ` boto . s3 . acl . CannedACLStrings `
: param policy : ignored in this subclass .
: type md5 : A tuple containing the hexdigest version of the MD5 checksum
of the file as the first element and the Base64 - encoded
version of the plain checksum as the second element .
This is the same format returned by the compute _ md5 method .
: param md5 : ignored in this subclass ."""
|
if self . key_type & self . KEY_STREAM_WRITABLE :
raise BotoClientError ( 'Stream is not writable' )
elif self . key_type & self . KEY_STREAM_READABLE :
key_file = self . fp
else :
if not replace and os . path . exists ( self . full_path ) :
return
key_file = open ( self . full_path , 'wb' )
try :
shutil . copyfileobj ( fp , key_file )
finally :
key_file . close ( )
|
def print_commandless_help ( self ) :
"""print _ commandless _ help"""
|
doc_help = self . m_doc . strip ( ) . split ( "\n" )
if len ( doc_help ) > 0 :
print ( "\033[33m--\033[0m" )
print ( "\033[34m" + doc_help [ 0 ] + "\033[0m" )
asp = "author :"
doc_help_rest = "\n" . join ( doc_help [ 1 : ] )
if asp in doc_help_rest :
doc_help_rest = doc_help_rest . split ( "author :" )
if len ( doc_help_rest ) > 1 :
print ( "\n\033[33m" + doc_help_rest [ 0 ] . strip ( ) + "\n" )
print ( "\033[37m" + asp + doc_help_rest [ 1 ] + "\033[0m" )
else :
print ( doc_help_rest )
else :
print ( doc_help_rest )
print ( "\033[33m--\033[0m" )
else :
print ( "\033[31mERROR, doc should have more then one line\033[0m" )
print ( self . m_doc )
|
def get_sidecar_container ( job_container_name , sidecar_container_name , sidecar_docker_image , sidecar_docker_image_pull_policy , namespace , sidecar_config , sidecar_args , internal_health_check_url , volume_mounts , env_vars = None ) :
"""Return a pod sidecar container ."""
|
env_vars = to_list ( env_vars ) if env_vars else [ ]
env_vars += get_sidecar_env_vars ( namespace = namespace , job_container_name = job_container_name , internal_health_check_url = internal_health_check_url )
for k , v in sidecar_config . items ( ) :
env_vars . append ( get_env_var ( name = k , value = v ) )
return client . V1Container ( name = sidecar_container_name , image = sidecar_docker_image , image_pull_policy = sidecar_docker_image_pull_policy , command = get_sidecar_command ( ) , env = env_vars , volume_mounts = volume_mounts , args = sidecar_args )
|
def set_dword_at_offset ( self , offset , dword ) :
"""Set the double word value at the given file offset ."""
|
return self . set_bytes_at_offset ( offset , self . get_data_from_dword ( dword ) )
|
def append ( self , record ) :
"""Adds the passed + record + to satisfy the query . Only intended to be
used in conjunction with associations ( i . e . do not use if self . record
is None ) .
Intended use case ( DO THIS ) :
post . comments . append ( comment )
NOT THIS :
Query ( Post ) . where ( content = " foo " ) . append ( post )"""
|
if self . record :
self . _validate_record ( record )
if self . join_args : # As always , the related record is created when the primary
# record is saved
build_args = dict ( self . where_query )
# The + final _ join + is what connects the record chain to the
# passed + record +
final_join = self . join_args [ - 2 ]
# don ' t need to worry about one - to - many through because
# there is not enough information to find or create the
# joining record
# i . e . in the Forum - > Thread - > Post example
# forum . posts . append ( post ) doesn ' t make sense since there
# is no information about what thread it will be attached to
# Thus , this only makes sense on many - to - many . BUT we still
# have to consider the case where there is a one - many - many
# To make that work , we need to treat this like when doing
# building
joining_relation = getattr ( self . record , final_join [ 'table' ] )
# Uses the lookup info in the join to figure out what ids to
# set , and where to get the id value from
joining_args = { final_join [ 'on' ] [ 0 ] : getattr ( record , final_join [ 'on' ] [ 1 ] ) }
build_args . update ( joining_args )
joining_record = joining_relation . build ( ** build_args )
self . record . _related_records . append ( joining_record )
else : # Add our id to their foreign key so that the relationship is
# created
setattr ( record , foreign_key ( record , self . record ) , self . record . id )
# Add to the list of related records so that it is saved when
# we are
self . record . _related_records . append ( record )
|
def center_middle ( r , window_size ) :
"""Center a region on its middle and expand it to window _ size bases .
: return : the new region ."""
|
res = copy . copy ( r )
mid = res . start + ( len ( res ) / 2 )
res . start = mid - ( window_size / 2 )
res . end = res . start + window_size
return res
|
def _walk_to_root ( path ) :
"""Yield directories starting from the given directory up to the root"""
|
if not os . path . exists ( path ) :
raise IOError ( 'Starting path not found' )
if os . path . isfile ( path ) :
path = os . path . dirname ( path )
last_dir = None
current_dir = os . path . abspath ( path )
while last_dir != current_dir :
yield current_dir
parent_dir = os . path . abspath ( os . path . join ( current_dir , os . path . pardir ) )
last_dir , current_dir = current_dir , parent_dir
|
def route ( self , path : str , methods : Optional [ List [ str ] ] = None , endpoint : Optional [ str ] = None , defaults : Optional [ dict ] = None , host : Optional [ str ] = None , subdomain : Optional [ str ] = None , * , provide_automatic_options : Optional [ bool ] = None , strict_slashes : bool = True , ) -> Callable :
"""Add a route to the blueprint .
This is designed to be used as a decorator , and has the same arguments
as : meth : ` ~ quart . Quart . route ` . An example usage ,
. . code - block : : python
blueprint = Blueprint ( _ _ name _ _ )
@ blueprint . route ( ' / ' )
def route ( ) :"""
|
def decorator ( func : Callable ) -> Callable :
self . add_url_rule ( path , endpoint , func , methods , defaults = defaults , host = host , subdomain = subdomain , provide_automatic_options = provide_automatic_options , strict_slashes = strict_slashes , )
return func
return decorator
|
def set_default_fig_style ( self ) :
'''default figure size'''
|
plt . rcParams . update ( { 'figure.figsize' : [ self . frontierswidth / self . inchpercm , self . frontierswidth / self . inchpercm ] , } )
|
def caution_title_header_element ( feature , parent ) :
"""Retrieve caution title header string from definitions ."""
|
_ = feature , parent
# NOQA
header = caution_title_header [ 'string_format' ]
return header . capitalize ( )
|
def refresh_win ( self , resizing = False ) :
"""set _ encoding is False when resizing"""
|
# self . init _ window ( set _ encoding )
self . _win . bkgdset ( ' ' , curses . color_pair ( 3 ) )
self . _win . erase ( )
self . _win . box ( )
self . _win . addstr ( 0 , int ( ( self . maxX - len ( self . _title ) ) / 2 ) , self . _title , curses . color_pair ( 4 ) )
self . refresh_selection ( resizing )
|
def _expand_probes ( probes , defaults ) :
'''Updates the probes dictionary with different levels of default values .'''
|
expected_probes = { }
for probe_name , probe_test in six . iteritems ( probes ) :
if probe_name not in expected_probes . keys ( ) :
expected_probes [ probe_name ] = { }
probe_defaults = probe_test . pop ( 'defaults' , { } )
for test_name , test_details in six . iteritems ( probe_test ) :
test_defaults = test_details . pop ( 'defaults' , { } )
expected_test_details = deepcopy ( defaults )
# copy first the general defaults
expected_test_details . update ( probe_defaults )
# update with more specific defaults if any
expected_test_details . update ( test_defaults )
# update with the most specific defaults if possible
expected_test_details . update ( test_details )
# update with the actual config of the test
if test_name not in expected_probes [ probe_name ] . keys ( ) :
expected_probes [ probe_name ] [ test_name ] = expected_test_details
return expected_probes
|
def returnTradeHistoryPublic ( self , currencyPair , start = None , end = None ) :
"""Returns the past 200 trades for a given market , or up to 50,000
trades between a range specified in UNIX timestamps by the " start "
and " end " GET parameters ."""
|
return super ( Poloniex , self ) . returnTradeHistory ( currencyPair , start , end )
|
def roster ( team_id ) :
"""Returns a dictionary of roster information for team id"""
|
data = mlbgame . data . get_roster ( team_id )
parsed = json . loads ( data . read ( ) . decode ( 'utf-8' ) )
players = parsed [ 'roster_40' ] [ 'queryResults' ] [ 'row' ]
return { 'players' : players , 'team_id' : team_id }
|
def get_cutout ( self , resource , resolution , x_range , y_range , z_range , time_range , id_list , url_prefix , auth , session , send_opts , access_mode = CacheMode . no_cache , ** kwargs ) :
"""Upload a cutout to the Boss data store .
Args :
resource ( intern . resource . resource . Resource ) : Resource compatible
with cutout operations
resolution ( int ) : 0 indicates native resolution .
x _ range ( list [ int ] ) : x range such as [ 10 , 20 ] which means x > = 10 and x < 20.
y _ range ( list [ int ] ) : y range such as [ 10 , 20 ] which means y > = 10 and y < 20.
z _ range ( list [ int ] ) : z range such as [ 10 , 20 ] which means z > = 10 and z < 20.
time _ range ( [ list [ int ] ] | None ) : time range such as [ 30 , 40 ] which means t > = 30 and t < 40.
id _ list ( list [ int ] ) : list of object ids to filter the cutout by .
url _ prefix ( string ) : Protocol + host such as https : / / api . theboss . io
auth ( string ) : Token to send in the request header .
session ( requests . Session ) : HTTP session to use for request .
send _ opts ( dictionary ) : Additional arguments to pass to session . send ( ) .
access _ mode ( optional [ Enum ] ) : Identifies one of three cache access options :
cache = Will check both cache and for dirty keys
no _ cache = Will skip cache check but check for dirty keys
raw = Will skip both the cache and dirty keys check
chunk _ size ( optional Tuple [ int , int , int ] ) : The chunk size to request
Returns :
( numpy . array ) : A 3D or 4D numpy matrix in ZXY ( time ) order .
Raises :
requests . HTTPError"""
|
chunk_size = kwargs . pop ( "chunk_size" , ( 512 , 512 , 16 * 8 ) )
# TODO : magic number
chunk_limit = ( chunk_size [ 0 ] * chunk_size [ 1 ] * chunk_size [ 2 ] ) * 1.2
# Check to see if this volume is larger than a single request . If so ,
# chunk it into several smaller bites :
if time_range :
cutout_size = ( ( x_range [ 1 ] - x_range [ 0 ] ) * ( y_range [ 1 ] - y_range [ 0 ] ) * ( z_range [ 1 ] - z_range [ 0 ] ) * ( time_range [ 1 ] - time_range [ 0 ] ) )
else :
cutout_size = ( ( x_range [ 1 ] - x_range [ 0 ] ) * ( y_range [ 1 ] - y_range [ 0 ] ) * ( z_range [ 1 ] - z_range [ 0 ] ) )
if cutout_size > chunk_limit :
blocks = block_compute ( x_range [ 0 ] , x_range [ 1 ] , y_range [ 0 ] , y_range [ 1 ] , z_range [ 0 ] , z_range [ 1 ] , block_size = chunk_size )
result = np . ndarray ( ( z_range [ 1 ] - z_range [ 0 ] , y_range [ 1 ] - y_range [ 0 ] , x_range [ 1 ] - x_range [ 0 ] ) , dtype = resource . datatype )
for b in blocks :
_data = self . get_cutout ( resource , resolution , b [ 0 ] , b [ 1 ] , b [ 2 ] , time_range , id_list , url_prefix , auth , session , send_opts , access_mode , ** kwargs )
result [ b [ 2 ] [ 0 ] - z_range [ 0 ] : b [ 2 ] [ 1 ] - z_range [ 0 ] , b [ 1 ] [ 0 ] - y_range [ 0 ] : b [ 1 ] [ 1 ] - y_range [ 0 ] , b [ 0 ] [ 0 ] - x_range [ 0 ] : b [ 0 ] [ 1 ] - x_range [ 0 ] ] = _data
return result
req = self . get_cutout_request ( resource , 'GET' , 'application/blosc' , url_prefix , auth , resolution , x_range , y_range , z_range , time_range , access_mode = access_mode , id_list = id_list , ** kwargs )
prep = session . prepare_request ( req )
# Hack in Accept header for now .
prep . headers [ 'Accept' ] = 'application/blosc'
resp = session . send ( prep , ** send_opts )
if resp . status_code == 200 :
raw_data = blosc . decompress ( resp . content )
data_mat = np . fromstring ( raw_data , dtype = resource . datatype )
if time_range : # Reshape including time
return np . reshape ( data_mat , ( time_range [ 1 ] - time_range [ 0 ] , z_range [ 1 ] - z_range [ 0 ] , y_range [ 1 ] - y_range [ 0 ] , x_range [ 1 ] - x_range [ 0 ] ) , order = 'C' )
else : # Reshape without including time
return np . reshape ( data_mat , ( z_range [ 1 ] - z_range [ 0 ] , y_range [ 1 ] - y_range [ 0 ] , x_range [ 1 ] - x_range [ 0 ] ) , order = 'C' )
msg = ( 'Get cutout failed on {}, got HTTP response: ({}) - {}' . format ( resource . name , resp . status_code , resp . text ) )
raise HTTPError ( msg , request = req , response = resp )
|
def get_grid_district_polygon ( config , subst_id = None , projection = 4326 ) :
"""Get MV grid district polygon from oedb for plotting ."""
|
# make DB session
conn = connection ( section = config [ 'db_connection' ] [ 'section' ] )
Session = sessionmaker ( bind = conn )
session = Session ( )
# get polygon from versioned schema
if config [ 'data_source' ] [ 'oedb_data_source' ] == 'versioned' :
version = config [ 'versioned' ] [ 'version' ]
query = session . query ( EgoDpMvGriddistrict . subst_id , EgoDpMvGriddistrict . geom )
Regions = [ ( subst_id , shape . to_shape ( geom ) ) for subst_id , geom in query . filter ( EgoDpMvGriddistrict . version == version , EgoDpMvGriddistrict . subst_id == subst_id ) . all ( ) ]
# get polygon from model _ draft
else :
query = session . query ( EgoGridMvGriddistrict . subst_id , EgoGridMvGriddistrict . geom )
Regions = [ ( subst_id , shape . to_shape ( geom ) ) for subst_id , geom in query . filter ( EgoGridMvGriddistrict . subst_id . in_ ( subst_id ) ) . all ( ) ]
crs = { 'init' : 'epsg:3035' }
region = gpd . GeoDataFrame ( Regions , columns = [ 'subst_id' , 'geometry' ] , crs = crs )
region = region . to_crs ( epsg = projection )
return region
|
def with_aad_user_password_authentication ( cls , connection_string , user_id , password , authority_id = "common" ) :
"""Creates a KustoConnection string builder that will authenticate with AAD user name and
password .
: param str connection _ string : Kusto connection string should by of the format : https : / / < clusterName > . kusto . windows . net
: param str user _ id : AAD user ID .
: param str password : Corresponding password of the AAD user .
: param str authority _ id : optional param . defaults to " common " """
|
_assert_value_is_valid ( user_id )
_assert_value_is_valid ( password )
kcsb = cls ( connection_string )
kcsb [ kcsb . ValidKeywords . aad_federated_security ] = True
kcsb [ kcsb . ValidKeywords . aad_user_id ] = user_id
kcsb [ kcsb . ValidKeywords . password ] = password
kcsb [ kcsb . ValidKeywords . authority_id ] = authority_id
return kcsb
|
def delete_dev_vlans ( vlanid , auth , url , devid = None , devip = None ) :
"""function takes devid and vlanid of specific device and 802.1q VLAN tag and issues a RESTFUL
call to remove the specified VLAN from the target device .
: param vlanid : int or str value of target 802.1q VLAN
: param auth : requests auth object # usually auth . creds from auth pyhpeimc . auth . class
: param url : base url of IMC RS interface # usually auth . url from pyhpeimc . auth . authclass
: return : HTTP response object from requests library . Status code should be 204 if Successful
: param devid : str requires devid of the target device
: param devip : str of ipv4 address of the target device
: rtype : requests . models . Response
> > > from pyhpeimc . auth import *
> > > from pyhpeimc . plat . vlanm import *
> > > auth = IMCAuth ( " http : / / " , " 10.101.0.203 " , " 8080 " , " admin " , " admin " )
> > > create _ dev _ vlan = create _ dev _ vlan ( ' 350 ' , ' 200 ' , ' test vlan ' , auth . creds , auth . url )"""
|
if devip is not None :
devid = get_dev_details ( devip , auth , url ) [ 'id' ]
remove_dev_vlan_url = "/imcrs/vlan/delvlan?devId=" + str ( devid ) + "&vlanId=" + str ( vlanid )
f_url = url + remove_dev_vlan_url
response = requests . delete ( f_url , auth = auth , headers = HEADERS )
try :
if response . status_code == 204 :
print ( 'Vlan deleted' )
return response . status_code
elif response . status_code == 409 :
print ( 'Unable to delete VLAN.\nVLAN does not Exist\nDevice does not support VLAN ' 'function' )
return response . status_code
except requests . exceptions . RequestException as error :
return "Error:\n" + str ( error ) + " delete_dev_vlans: An Error has occured"
|
def symmetric_difference ( self , other , result_name = None , sort = None ) :
"""Compute the symmetric difference of two Index objects .
Parameters
other : Index or array - like
result _ name : str
sort : False or None , default None
Whether to sort the resulting index . By default , the
values are attempted to be sorted , but any TypeError from
incomparable elements is caught by pandas .
* None : Attempt to sort the result , but catch any TypeErrors
from comparing incomparable elements .
* False : Do not sort the result .
. . versionadded : : 0.24.0
. . versionchanged : : 0.24.1
Changed the default value from ` ` True ` ` to ` ` None ` `
( without change in behaviour ) .
Returns
symmetric _ difference : Index
Notes
` ` symmetric _ difference ` ` contains elements that appear in either
` ` idx1 ` ` or ` ` idx2 ` ` but not both . Equivalent to the Index created by
` ` idx1 . difference ( idx2 ) | idx2 . difference ( idx1 ) ` ` with duplicates
dropped .
Examples
> > > idx1 = pd . Index ( [ 1 , 2 , 3 , 4 ] )
> > > idx2 = pd . Index ( [ 2 , 3 , 4 , 5 ] )
> > > idx1 . symmetric _ difference ( idx2)
Int64Index ( [ 1 , 5 ] , dtype = ' int64 ' )
You can also use the ` ` ^ ` ` operator :
> > > idx1 ^ idx2
Int64Index ( [ 1 , 5 ] , dtype = ' int64 ' )"""
|
self . _validate_sort_keyword ( sort )
self . _assert_can_do_setop ( other )
other , result_name_update = self . _convert_can_do_setop ( other )
if result_name is None :
result_name = result_name_update
this = self . _get_unique_index ( )
other = other . _get_unique_index ( )
indexer = this . get_indexer ( other )
# { this } minus { other }
common_indexer = indexer . take ( ( indexer != - 1 ) . nonzero ( ) [ 0 ] )
left_indexer = np . setdiff1d ( np . arange ( this . size ) , common_indexer , assume_unique = True )
left_diff = this . values . take ( left_indexer )
# { other } minus { this }
right_indexer = ( indexer == - 1 ) . nonzero ( ) [ 0 ]
right_diff = other . values . take ( right_indexer )
the_diff = _concat . _concat_compat ( [ left_diff , right_diff ] )
if sort is None :
try :
the_diff = sorting . safe_sort ( the_diff )
except TypeError :
pass
attribs = self . _get_attributes_dict ( )
attribs [ 'name' ] = result_name
if 'freq' in attribs :
attribs [ 'freq' ] = None
return self . _shallow_copy_with_infer ( the_diff , ** attribs )
|
def future_datetime ( end = '+30d' ) :
"""Returns a ` ` datetime ` ` object in the future ( that is , 1 second from now ) up
to the specified ` ` end ` ` . ` ` end ` ` can be a string , anotther datetime , or a
timedelta . If it ' s a string , it must start with ` + ` , followed by and integer
and a unit , Eg : ` ` ' + 30d ' ` ` . Defaults to ` ' + 30d ' `
Valid units are :
* ` ` ' years ' ` ` , ` ` ' y ' ` `
* ` ` ' weeks ' ` ` , ` ` ' w ' ` `
* ` ` ' days ' ` ` , ` ` ' d ' ` `
* ` ` ' hours ' ` ` , ` ` ' hours ' ` `
* ` ` ' minutes ' ` ` , ` ` ' m ' ` `
* ` ` ' seconds ' ` ` , ` ` ' s ' ` `"""
|
return lambda n , f : f . future_datetime ( end_date = end , tzinfo = get_timezone ( ) , )
|
def update_old_to_new ( self , col , old_val , new_val ) :
"""simply updates all rows and sets COL to NEW _ VAL where col = old _ val
e . g . update _ old _ to _ new ( " NAME " , " The University of Adelaide " , " University of Adelaide " )
will generate
UPDATE table op SET op . NAME = ' University of Adelaide ' WHERE op . NAME = ' The University of Adelaide ' ;"""
|
self . sql_text += "UPDATE " + self . fact_table + " SET " + col + " = '" + new_val + "' WHERE " + col + " = '" + old_val + "'; \n"
|
def has_children ( self ) :
"""Checks if there are children tab widgets .
: return : True if there is at least one tab in the children tab widget ."""
|
for splitter in self . child_splitters :
if splitter . has_children ( ) :
return splitter
return self . main_tab_widget . count ( ) != 0
|
def kill ( restriction = None , connection = None ) : # pragma : no cover
"""view and kill database connections .
: param restriction : restriction to be applied to processlist
: param connection : a datajoint . Connection object . Default calls datajoint . conn ( )
Restrictions are specified as strings and can involve any of the attributes of
information _ schema . processlist : ID , USER , HOST , DB , COMMAND , TIME , STATE , INFO .
Examples :
dj . kill ( ' HOST LIKE " % compute % " ' ) lists only connections from hosts containing " compute " .
dj . kill ( ' TIME > 600 ' ) lists only connections older than 10 minutes ."""
|
if connection is None :
connection = conn ( )
query = 'SELECT * FROM information_schema.processlist WHERE id <> CONNECTION_ID()' + ( "" if restriction is None else ' AND (%s)' % restriction )
while True :
print ( ' ID USER STATE TIME INFO' )
print ( '+--+ +----------+ +-----------+ +--+' )
cur = connection . query ( query , as_dict = True )
for process in cur :
try :
print ( '{ID:>4d} {USER:<12s} {STATE:<12s} {TIME:>5d} {INFO}' . format ( ** process ) )
except TypeError :
print ( process )
response = input ( 'process to kill or "q" to quit > ' )
if response == 'q' :
break
if response :
try :
pid = int ( response )
except ValueError :
pass
# ignore non - numeric input
else :
try :
connection . query ( 'kill %d' % pid )
except pymysql . err . InternalError :
print ( 'Process not found' )
|
def iter_merge ( cls , timeseries_list ) :
"""Iterate through several time series in order , yielding ( time , list )
tuples where list is the values of each individual TimeSeries
in the list at time t ."""
|
# using return without an argument is the way to say " the
# iterator is empty " when there is nothing to iterate over
# ( the more you know . . . )
if not timeseries_list :
return
# for ts in timeseries _ list :
# if ts . is _ floating ( ) :
# msg = " can ' t merge empty TimeSeries with no default value "
# raise KeyError ( msg )
# This function mostly wraps _ iter _ merge , the main point of
# this is to deal with the case of tied times , where we only
# want to yield the last list of values that occurs for any
# group of tied times .
index , previous_t , previous_state = - 1 , object ( ) , object ( )
for index , ( t , state ) in enumerate ( cls . _iter_merge ( timeseries_list ) ) :
if index > 0 and t != previous_t :
yield previous_t , previous_state
previous_t , previous_state = t , state
# only yield final thing if there was at least one element
# yielded by _ iter _ merge
if index > - 1 :
yield previous_t , previous_state
|
def is_visible ( self , pos : Union [ Point2 , Point3 , Unit ] ) -> bool :
"""Returns True if you have vision on a grid point ."""
|
# more info : https : / / github . com / Blizzard / s2client - proto / blob / 9906df71d6909511907d8419b33acc1a3bd51ec0 / s2clientprotocol / spatial . proto # L19
assert isinstance ( pos , ( Point2 , Point3 , Unit ) )
pos = pos . position . to2 . rounded
return self . state . visibility [ pos ] == 2
|
def get_form_class ( self ) :
"""Return a ` ` TranslatableModelForm ` ` by default if no form _ class is set ."""
|
super_method = super ( TranslatableModelFormMixin , self ) . get_form_class
# no " _ _ func _ _ " on the class level function in python 3
default_method = getattr ( ModelFormMixin . get_form_class , '__func__' , ModelFormMixin . get_form_class )
if not ( super_method . __func__ is default_method ) : # Don ' t get in your way , if you ' ve overwritten stuff .
return super_method ( )
else : # Same logic as ModelFormMixin . get _ form _ class , but using the right form base class .
if self . form_class :
return self . form_class
else :
model = _get_view_model ( self )
if self . fields :
fields = self . fields
return modelform_factory ( model , form = TranslatableModelForm , fields = fields )
else :
return modelform_factory ( model , form = TranslatableModelForm )
|
def save_token ( self , access_token ) :
"""Stores an access token and additional data in memory .
: param access _ token : An instance of : class : ` oauth2 . datatype . AccessToken ` ."""
|
self . access_tokens [ access_token . token ] = access_token
unique_token_key = self . _unique_token_key ( access_token . client_id , access_token . grant_type , access_token . user_id )
self . unique_token_identifier [ unique_token_key ] = access_token . token
if access_token . refresh_token is not None :
self . refresh_tokens [ access_token . refresh_token ] = access_token
return True
|
def parse_blocks ( self , text ) :
"""Extract the code and non - code blocks from given markdown text .
Returns a list of block dictionaries .
Each dictionary has at least the keys ' type ' and ' content ' ,
containing the type of the block ( ' markdown ' , ' code ' ) and
the contents of the block .
Additional keys may be parsed as well .
We should switch to an external markdown library if this
gets much more complicated !"""
|
code_matches = [ m for m in self . code_pattern . finditer ( text ) ]
# determine where the limits of the non code bits are
# based on the code block edges
text_starts = [ 0 ] + [ m . end ( ) for m in code_matches ]
text_stops = [ m . start ( ) for m in code_matches ] + [ len ( text ) ]
text_limits = list ( zip ( text_starts , text_stops ) )
# list of the groups from the code blocks
code_blocks = [ self . new_code_block ( ** m . groupdict ( ) ) for m in code_matches ]
text_blocks = [ self . new_text_block ( content = text [ i : j ] ) for i , j in text_limits ]
# remove indents
list ( map ( self . pre_process_code_block , code_blocks ) )
# remove blank line at start and end of markdown
list ( map ( self . pre_process_text_block , text_blocks ) )
# create a list of the right length
all_blocks = list ( range ( len ( text_blocks ) + len ( code_blocks ) ) )
# NOTE : the behaviour here is a bit fragile in that we
# assume that cells must alternate between code and
# markdown . This isn ' t the case , as we could have
# consecutive code cells , and we get around this by
# stripping out empty cells . i . e . two consecutive code cells
# have an empty markdown cell between them which is stripped
# out because it is empty .
# cells must alternate in order
all_blocks [ : : 2 ] = text_blocks
all_blocks [ 1 : : 2 ] = code_blocks
# remove possible empty text cells
all_blocks = [ cell for cell in all_blocks if cell [ 'content' ] ]
return all_blocks
|
def nonlinear_tidal_spa ( ** kwds ) :
"""Generates a frequency - domain waveform that implements the
TaylorF2 + NL tide model described in https : / / arxiv . org / abs / 1808.07013"""
|
from pycbc import waveform
from pycbc . types import Array
# We start with the standard TaylorF2 based waveform
kwds . pop ( 'approximant' )
hp , hc = waveform . get_fd_waveform ( approximant = "TaylorF2" , ** kwds )
# Add the phasing difference from the nonlinear tides
f = numpy . arange ( len ( hp ) ) * hp . delta_f
pd = Array ( numpy . exp ( - 1.0j * nltides_fourier_phase_difference ( f , hp . delta_f , kwds [ 'f0' ] , kwds [ 'amplitude' ] , kwds [ 'n' ] , kwds [ 'mass1' ] , kwds [ 'mass2' ] ) ) , dtype = hp . dtype )
hp *= pd
hc *= pd
return hp , hc
|
def count_num_trees ( nexson , nexson_version = None ) :
"""Returns the number of trees summed across all tree
groups ."""
|
if nexson_version is None :
nexson_version = detect_nexson_version ( nexson )
nex = get_nexml_el ( nexson )
num_trees_by_group = [ ]
if _is_by_id_hbf ( nexson_version ) :
for tree_group in nex . get ( 'treesById' , { } ) . values ( ) :
nt = len ( tree_group . get ( 'treeById' , { } ) )
num_trees_by_group . append ( nt )
else :
trees_group = nex . get ( 'trees' , [ ] )
if isinstance ( trees_group , dict ) :
trees_group = [ trees_group ]
for tree_group in trees_group :
t = tree_group . get ( 'tree' )
if isinstance ( t , list ) :
nt = len ( t )
else :
nt = 1
num_trees_by_group . append ( nt )
return sum ( num_trees_by_group )
|
def update ( self ) :
"""Update the parking brake sensor ."""
|
self . _controller . update ( self . _id , wake_if_asleep = False )
data = self . _controller . get_drive_params ( self . _id )
if data :
if not data [ 'shift_state' ] or data [ 'shift_state' ] == 'P' :
self . __state = True
else :
self . __state = False
|
def add_circles ( self , ra_cen , dec_cen , radius , depth = None ) :
"""Add one or more circles to this region
Parameters
ra _ cen , dec _ cen , radius : float or list
The center and radius of the circle or circles to add to this region .
depth : int
The depth at which the given circles will be inserted ."""
|
if depth is None or depth > self . maxdepth :
depth = self . maxdepth
try :
sky = list ( zip ( ra_cen , dec_cen ) )
rad = radius
except TypeError :
sky = [ [ ra_cen , dec_cen ] ]
rad = [ radius ]
sky = np . array ( sky )
rad = np . array ( rad )
vectors = self . sky2vec ( sky )
for vec , r in zip ( vectors , rad ) :
pix = hp . query_disc ( 2 ** depth , vec , r , inclusive = True , nest = True )
self . add_pixels ( pix , depth )
self . _renorm ( )
return
|
def line ( ax , p1 , p2 , permutation = None , ** kwargs ) :
"""Draws a line on ` ax ` from p1 to p2.
Parameters
ax : Matplotlib AxesSubplot , None
The subplot to draw on .
p1 : 2 - tuple
The ( x , y ) starting coordinates
p2 : 2 - tuple
The ( x , y ) ending coordinates
kwargs :
Any kwargs to pass through to Matplotlib ."""
|
pp1 = project_point ( p1 , permutation = permutation )
pp2 = project_point ( p2 , permutation = permutation )
ax . add_line ( Line2D ( ( pp1 [ 0 ] , pp2 [ 0 ] ) , ( pp1 [ 1 ] , pp2 [ 1 ] ) , ** kwargs ) )
|
def get_behave_args ( self , argv = sys . argv ) :
"""Get a list of those command line arguments specified with the
management command that are meant as arguments for running behave ."""
|
parser = BehaveArgsHelper ( ) . create_parser ( 'manage.py' , 'behave' )
args , unknown = parser . parse_known_args ( argv [ 2 : ] )
behave_args = [ ]
for option in unknown : # Remove behave prefix
if option . startswith ( '--behave-' ) :
option = option . replace ( '--behave-' , '' , 1 )
prefix = '-' if len ( option ) == 1 else '--'
option = prefix + option
behave_args . append ( option )
return behave_args
|
def include_sqlalchemy_models ( nc , Base ) :
"""Include all SQLAlchemy models in the script context .
: param nc : notebook _ context dictionary
: param Base : SQLAlchemy model Base class from where the all models inherit ."""
|
from sqlalchemy . ext . declarative . clsregistry import _ModuleMarker
# Include all SQLAlchemy models in the local namespace
for name , klass in Base . _decl_class_registry . items ( ) :
print ( name , klass )
if isinstance ( klass , _ModuleMarker ) :
continue
add_script ( nc , get_import_statement ( klass ) )
add_greeting ( nc , "* **{}** - {}" . format ( klass . __name__ , get_dotted_path ( klass ) ) )
|
def finish ( self , value ) :
'''Give the future it ' s value and trigger any associated callbacks
: param value : the new value for the future
: raises :
: class : ` AlreadyComplete < junction . errors . AlreadyComplete > ` if
already complete'''
|
if self . _done . is_set ( ) :
raise errors . AlreadyComplete ( )
self . _value = value
for cb in self . _cbacks :
backend . schedule ( cb , args = ( value , ) )
self . _cbacks = None
for wait in list ( self . _waits ) :
wait . finish ( self )
self . _waits = None
for child in self . _children :
child = child ( )
if child is None :
continue
child . _incoming ( self , value )
self . _children = None
self . _done . set ( )
|
def row_csv_limiter ( rows , limits = None ) :
"""Limit row passing a value or detect limits making the best effort ."""
|
limits = [ None , None ] if limits is None else limits
if len ( exclude_empty_values ( limits ) ) == 2 :
upper_limit = limits [ 0 ]
lower_limit = limits [ 1 ]
elif len ( exclude_empty_values ( limits ) ) == 1 :
upper_limit = limits [ 0 ]
lower_limit = row_iter_limiter ( rows , 1 , - 1 , 1 )
else :
upper_limit = row_iter_limiter ( rows , 0 , 1 , 0 )
lower_limit = row_iter_limiter ( rows , 1 , - 1 , 1 )
return rows [ upper_limit : lower_limit ]
|
def project ( self , projection_matrix , inplace = True , log = None , enforce_bounds = "reset" ) :
"""project the ensemble using the null - space Monte Carlo method
Parameters
projection _ matrix : pyemu . Matrix
projection operator - must already respect log transform
inplace : bool
project self or return a new ParameterEnsemble instance
log : pyemu . Logger
for logging progress
enforce _ bounds : str
parameter bound enforcement flag . ' drop ' removes
offending realizations , ' reset ' resets offending values
Returns
ParameterEnsemble : ParameterEnsemble
if inplace is False"""
|
if self . istransformed :
self . _back_transform ( )
istransformed = self . pst . parameter_data . loc [ : , "partrans" ] == "log"
self . loc [ : , istransformed ] = self . loc [ : , istransformed ] . applymap ( lambda x : math . log10 ( x ) )
self . __istransformed = True
# make sure everything is cool WRT ordering
common_names = get_common_elements ( self . adj_names , projection_matrix . row_names )
base = self . mean_values . loc [ common_names ]
projection_matrix = projection_matrix . get ( common_names , common_names )
if not inplace :
new_en = ParameterEnsemble ( pst = self . pst . get ( ) , data = self . loc [ : , : ] . copy ( ) , columns = self . columns , mean_values = self . mean_values . copy ( ) , istransformed = self . istransformed )
for real in self . index :
if log is not None :
log ( "projecting realization {0}" . format ( real ) )
# null space projection of difference vector
pdiff = self . loc [ real , common_names ] - base
pdiff = np . dot ( projection_matrix . x , ( self . loc [ real , common_names ] - base ) . values )
if inplace :
self . loc [ real , common_names ] = base + pdiff
else :
new_en . loc [ real , common_names ] = base + pdiff
if log is not None :
log ( "projecting realization {0}" . format ( real ) )
if not inplace :
new_en . enforce ( enforce_bounds )
new_en . loc [ : , istransformed ] = 10.0 ** new_en . loc [ : , istransformed ]
new_en . __istransformed = False
# new _ en . _ back _ transform ( )
return new_en
self . enforce ( enforce_bounds )
self . loc [ : , istransformed ] = 10.0 ** self . loc [ : , istransformed ]
self . __istransformed = False
|
def __Login ( host , port , user , pwd , service , adapter , version , path , keyFile , certFile , thumbprint , sslContext , connectionPoolTimeout = CONNECTION_POOL_IDLE_TIMEOUT_SEC ) :
"""Private method that performs the actual Connect and returns a
connected service instance object .
@ param host : Which host to connect to .
@ type host : string
@ param port : Port
@ type port : int
@ param user : User
@ type user : string
@ param pwd : Password
@ type pwd : string
@ param service : Service
@ type service : string
@ param adapter : Adapter
@ type adapter : string
@ param version : Version
@ type version : string
@ param path : Path
@ type path : string
@ param keyFile : ssl key file path
@ type keyFile : string
@ param certFile : ssl cert file path
@ type certFile : string
@ param thumbprint : host cert thumbprint
@ type thumbprint : string
@ param sslContext : SSL Context describing the various SSL options . It is only
supported in Python 2.7.9 or higher .
@ type sslContext : SSL . Context
@ param connectionPoolTimeout : Timeout in secs for idle connections to close , specify negative numbers for never
closing the connections
@ type connectionPoolTimeout : int"""
|
content , si , stub = __RetrieveContent ( host , port , adapter , version , path , keyFile , certFile , thumbprint , sslContext , connectionPoolTimeout )
# Get a ticket if we ' re connecting to localhost and password is not specified
if host == 'localhost' and not pwd :
try :
( user , pwd ) = GetLocalTicket ( si , user )
except :
pass
# This is not supported against vCenter , and connecting
# with an empty password is fine in debug builds
# Login
try :
x = content . sessionManager . Login ( user , pwd , None )
except vim . fault . InvalidLogin :
raise
except Exception as e :
raise
return si , stub
|
def groupby ( self , dimensions = None , container_type = None , group_type = None , ** kwargs ) :
"""Groups DynamicMap by one or more dimensions
Applies groupby operation over the specified dimensions
returning an object of type container _ type ( expected to be
dictionary - like ) containing the groups .
Args :
dimensions : Dimension ( s ) to group by
container _ type : Type to cast group container to
group _ type : Type to cast each group to
dynamic : Whether to return a DynamicMap
* * kwargs : Keyword arguments to pass to each group
Returns :
Returns object of supplied container _ type containing the
groups . If dynamic = True returns a DynamicMap instead ."""
|
if dimensions is None :
dimensions = self . kdims
if not isinstance ( dimensions , ( list , tuple ) ) :
dimensions = [ dimensions ]
container_type = container_type if container_type else type ( self )
group_type = group_type if group_type else type ( self )
outer_kdims = [ self . get_dimension ( d ) for d in dimensions ]
inner_kdims = [ d for d in self . kdims if not d in outer_kdims ]
outer_dynamic = issubclass ( container_type , DynamicMap )
inner_dynamic = issubclass ( group_type , DynamicMap )
if ( ( not outer_dynamic and any ( not d . values for d in outer_kdims ) ) or ( not inner_dynamic and any ( not d . values for d in inner_kdims ) ) ) :
raise Exception ( 'Dimensions must specify sampling via ' 'values to apply a groupby' )
if outer_dynamic :
def outer_fn ( * outer_key , ** dynkwargs ) :
if inner_dynamic :
def inner_fn ( * inner_key , ** dynkwargs ) :
outer_vals = zip ( outer_kdims , util . wrap_tuple ( outer_key ) )
inner_vals = zip ( inner_kdims , util . wrap_tuple ( inner_key ) )
inner_sel = [ ( k . name , v ) for k , v in inner_vals ]
outer_sel = [ ( k . name , v ) for k , v in outer_vals ]
return self . select ( ** dict ( inner_sel + outer_sel ) )
return self . clone ( [ ] , callback = inner_fn , kdims = inner_kdims )
else :
dim_vals = [ ( d . name , d . values ) for d in inner_kdims ]
dim_vals += [ ( d . name , [ v ] ) for d , v in zip ( outer_kdims , util . wrap_tuple ( outer_key ) ) ]
with item_check ( False ) :
selected = HoloMap ( self . select ( ** dict ( dim_vals ) ) )
return group_type ( selected . reindex ( inner_kdims ) )
if outer_kdims :
return self . clone ( [ ] , callback = outer_fn , kdims = outer_kdims )
else :
return outer_fn ( ( ) )
else :
outer_product = itertools . product ( * [ self . get_dimension ( d ) . values for d in dimensions ] )
groups = [ ]
for outer in outer_product :
outer_vals = [ ( d . name , [ o ] ) for d , o in zip ( outer_kdims , outer ) ]
if inner_dynamic or not inner_kdims :
def inner_fn ( outer_vals , * key , ** dynkwargs ) :
inner_dims = zip ( inner_kdims , util . wrap_tuple ( key ) )
inner_vals = [ ( d . name , k ) for d , k in inner_dims ]
return self . select ( ** dict ( outer_vals + inner_vals ) ) . last
if inner_kdims or self . streams :
group = self . clone ( callback = partial ( inner_fn , outer_vals ) , kdims = inner_kdims )
else :
group = inner_fn ( outer_vals , ( ) )
groups . append ( ( outer , group ) )
else :
inner_vals = [ ( d . name , self . get_dimension ( d ) . values ) for d in inner_kdims ]
with item_check ( False ) :
selected = HoloMap ( self . select ( ** dict ( outer_vals + inner_vals ) ) )
group = group_type ( selected . reindex ( inner_kdims ) )
groups . append ( ( outer , group ) )
return container_type ( groups , kdims = outer_kdims )
|
def distance_matrix ( stream_list , allow_shift = False , shift_len = 0 , cores = 1 ) :
"""Compute distance matrix for waveforms based on cross - correlations .
Function to compute the distance matrix for all templates - will give
distance as 1 - abs ( cccoh ) , e . g . a well correlated pair of templates will
have small distances , and an equally well correlated reverse image will
have the same distance as a positively correlated image - this is an issue .
: type stream _ list : list
: param stream _ list :
List of the : class : ` obspy . core . stream . Stream ` to compute the distance
matrix for
: type allow _ shift : bool
: param allow _ shift : To allow templates to shift or not ?
: type shift _ len : float
: param shift _ len : How many seconds for templates to shift
: type cores : int
: param cores : Number of cores to parallel process using , defaults to 1.
: returns : distance matrix
: rtype : : class : ` numpy . ndarray `
. . warning : :
Because distance is given as : math : ` 1 - abs ( coherence ) ` , negatively
correlated and positively correlated objects are given the same
distance ."""
|
# Initialize square matrix
dist_mat = np . array ( [ np . array ( [ 0.0 ] * len ( stream_list ) ) ] * len ( stream_list ) )
for i , master in enumerate ( stream_list ) : # Start a parallel processing pool
pool = Pool ( processes = cores )
# Parallel processing
results = [ pool . apply_async ( cross_chan_coherence , args = ( master , stream_list [ j ] , allow_shift , shift_len , j ) ) for j in range ( len ( stream_list ) ) ]
pool . close ( )
# Extract the results when they are done
dist_list = [ p . get ( ) for p in results ]
# Close and join all the processes back to the master process
pool . join ( )
# Sort the results by the input j
dist_list . sort ( key = lambda tup : tup [ 1 ] )
# Sort the list into the dist _ mat structure
for j in range ( i , len ( stream_list ) ) :
if i == j :
dist_mat [ i , j ] = 0.0
else :
dist_mat [ i , j ] = 1 - dist_list [ j ] [ 0 ]
# Reshape the distance matrix
for i in range ( 1 , len ( stream_list ) ) :
for j in range ( i ) :
dist_mat [ i , j ] = dist_mat . T [ i , j ]
return dist_mat
|
def _get_sync ( self , url ) :
"""Internal method used for GET requests
Args :
url ( str ) : URL to fetch
Returns :
Individual URL request ' s response
Raises :
HTTPError : If HTTP request failed ."""
|
response = self . session . get ( url )
if response . status_code == requests . codes . ok :
return response . json ( )
else :
raise HTTPError
|
def _refresh_state ( self ) :
"""Refresh the job info ."""
|
self . _info = self . _api . projects ( ) . jobs ( ) . get ( name = self . _name ) . execute ( )
self . _fatal_error = self . _info . get ( 'errorMessage' , None )
state = str ( self . _info . get ( 'state' ) )
self . _is_complete = ( state == 'SUCCEEDED' or state == 'FAILED' )
|
def parameter_from_numpy ( self , name , array ) :
"""Create parameter with its value initialized according to a numpy tensor
Parameters
name : str
parameter name
array : np . ndarray
initiation value
Returns
mxnet . gluon . parameter
a parameter object"""
|
p = self . params . get ( name , shape = array . shape , init = mx . init . Constant ( array ) )
return p
|
def _make_suffix ( cov ) :
"""Create a suffix for nbval data file depending on pytest - cov config ."""
|
# Check if coverage object has data _ suffix :
if cov and cov . data_suffix is not None : # If True , the suffix will be autogenerated by coverage . py .
# The suffixed data files will be automatically combined later .
if cov . data_suffix is True :
return True
# Has a suffix , but we add our own extension
return cov . data_suffix + '.nbval'
return 'nbval'
|
def compile_with_coverage_instrumentation ( self , # type : ignore
manager_container , container : 'Container' , # type : ignore
verbose : bool = False ) -> CompilationOutcome :
"""See ` Compiler . compile _ with _ coverage _ instrumentation `"""
|
if self . __command_with_instrumentation :
cmd = self . __command_with_instrumentation
else :
cmd = self . __command
return self . __compile ( manager_container , container , cmd , verbose )
|
def match_prefix ( prefix , line ) :
"""Check if the line starts with given prefix and
return the position of the end of prefix .
If the prefix is not matched , return - 1."""
|
m = re . match ( prefix , line . expandtabs ( 4 ) )
if not m :
if re . match ( prefix , line . expandtabs ( 4 ) . replace ( '\n' , ' ' * 99 + '\n' ) ) :
return len ( line ) - 1
return - 1
pos = m . end ( )
if pos == 0 :
return 0
for i in range ( 1 , len ( line ) + 1 ) :
if len ( line [ : i ] . expandtabs ( 4 ) ) >= pos :
return i
|
def keep_only_sticked_and_selected_tabs ( self ) :
"""Close all tabs , except the currently active one and all sticked ones"""
|
# Only if the user didn ' t deactivate this behaviour
if not global_gui_config . get_config_value ( 'KEEP_ONLY_STICKY_STATES_OPEN' , True ) :
return
page_id = self . view . notebook . get_current_page ( )
# No tabs are open
if page_id == - 1 :
return
page = self . view . notebook . get_nth_page ( page_id )
current_state_identifier = self . get_state_identifier_for_page ( page )
states_to_be_closed = [ ]
# Iterate over all tabs
for state_identifier , tab_info in list ( self . tabs . items ( ) ) : # If the tab is currently open , keep it open
if current_state_identifier == state_identifier :
continue
# If the tab is sticky , keep it open
if tab_info [ 'is_sticky' ] :
continue
# Otherwise close it
states_to_be_closed . append ( state_identifier )
for state_identifier in states_to_be_closed :
self . close_page ( state_identifier , delete = False )
|
def on_key_release_repeat ( self , * dummy ) :
"""Avoid repeated trigger of callback .
When holding a key down , multiple key press and release events
are fired in succession . Debouncing is implemented to squash these ."""
|
self . has_prev_key_release = self . after_idle ( self . on_key_release , dummy )
|
def get_base_url ( url , include_path = False ) :
""": return : the url without the query or fragment segments"""
|
if not url :
return None
parts = _urlsplit ( url )
base_url = _urlunsplit ( ( parts . scheme , parts . netloc , ( parts . path if include_path else '' ) , None , None ) )
return base_url if base_url . endswith ( '/' ) else base_url + '/'
|
def map_query_string ( self ) :
"""Maps the GET query string params the the query _ key _ mapper dict and
updates the request ' s GET QueryDict with the mapped keys ."""
|
if ( not self . query_key_mapper or self . request . method == 'POST' ) : # Nothing to map , don ' t do anything .
# return self . request . POST
return { }
keys = list ( self . query_key_mapper . keys ( ) )
return { self . query_key_mapper . get ( k ) if k in keys else k : v . strip ( ) for k , v in self . request . GET . items ( ) }
|
def ok ( self ) :
"""Validate color selection and destroy dialog ."""
|
rgb , hsv , hexa = self . square . get ( )
if self . alpha_channel :
hexa = self . hexa . get ( )
rgb += ( self . alpha . get ( ) , )
self . color = rgb , hsv , hexa
self . destroy ( )
|
def boundaries ( self , boundaryEdges = True , featureAngle = 65 , nonManifoldEdges = True ) :
"""Return an ` ` Actor ` ` that shows the boundary lines of an input mesh .
: param bool boundaryEdges : Turn on / off the extraction of boundary edges .
: param float featureAngle : Specify the feature angle for extracting feature edges .
: param bool nonManifoldEdges : Turn on / off the extraction of non - manifold edges ."""
|
fe = vtk . vtkFeatureEdges ( )
fe . SetInputData ( self . polydata ( ) )
fe . SetBoundaryEdges ( boundaryEdges )
if featureAngle :
fe . FeatureEdgesOn ( )
fe . SetFeatureAngle ( featureAngle )
else :
fe . FeatureEdgesOff ( )
fe . SetNonManifoldEdges ( nonManifoldEdges )
fe . ColoringOff ( )
fe . Update ( )
return Actor ( fe . GetOutput ( ) , c = "p" ) . lw ( 5 )
|
def _combine_clique_scores ( self , rscore , hbar , vbar ) :
"""Computes the score of a partial native clique embedding given the score
attained by the already - placed ells , together with the ell block
defined by ` ` hbar = ( y0 , xmin , xmax ) ` ` , and ` ` vbar = ( x0 , ymin , ymax ) ` ` .
In the plain : class : ` eden _ processor ` class , this is simply the number of ells
contained in the partial native clique after adding the new ells ."""
|
( y0 , xmin , xmax ) = hbar
( x0 , ymin , ymax ) = vbar
if rscore is None :
rscore = 0
hscore = self . hline_score ( y0 , xmin , xmax )
vscore = self . vline_score ( x0 , ymin , ymax )
if vscore < hscore :
score = rscore + vscore
else :
score = rscore + hscore
return score
|
def write_to_socket ( self , frame_data ) :
"""Write data to the socket .
: param str frame _ data :
: return :"""
|
self . _wr_lock . acquire ( )
try :
total_bytes_written = 0
bytes_to_send = len ( frame_data )
while total_bytes_written < bytes_to_send :
try :
if not self . socket :
raise socket . error ( 'connection/socket error' )
bytes_written = ( self . socket . send ( frame_data [ total_bytes_written : ] ) )
if bytes_written == 0 :
raise socket . error ( 'connection/socket error' )
total_bytes_written += bytes_written
except socket . timeout :
pass
except socket . error as why :
if why . args [ 0 ] in ( EWOULDBLOCK , EAGAIN ) :
continue
self . _exceptions . append ( AMQPConnectionError ( why ) )
return
finally :
self . _wr_lock . release ( )
|
def get_image_format ( filename ) :
"""Get the image format ."""
|
image = None
bad_image = 1
image_format = NONE_FORMAT
sequenced = False
try :
bad_image = Image . open ( filename ) . verify ( )
image = Image . open ( filename )
image_format = image . format
sequenced = _is_image_sequenced ( image )
except ( OSError , IOError , AttributeError ) :
pass
if sequenced :
image_format = gif . SEQUENCED_TEMPLATE . format ( image_format )
elif image is None or bad_image or image_format == NONE_FORMAT :
image_format = ERROR_FORMAT
comic_format = comic . get_comic_format ( filename )
if comic_format :
image_format = comic_format
if ( Settings . verbose > 1 ) and image_format == ERROR_FORMAT and ( not Settings . list_only ) :
print ( filename , "doesn't look like an image or comic archive." )
return image_format
|
def meantsubpool ( d , data_read ) :
"""Wrapper for mean visibility subtraction in time .
Doesn ' t work when called from pipeline using multiprocessing pool ."""
|
logger . info ( 'Subtracting mean visibility in time...' )
data_read = numpyview ( data_read_mem , 'complex64' , datashape ( d ) )
tsubpart = partial ( rtlib . meantsub , data_read )
blranges = [ ( d [ 'nbl' ] * t / d [ 'nthread' ] , d [ 'nbl' ] * ( t + 1 ) / d [ 'nthread' ] ) for t in range ( d [ 'nthread' ] ) ]
with closing ( mp . Pool ( 1 , initializer = initreadonly , initargs = ( data_read_mem , ) ) ) as tsubpool :
tsubpool . map ( tsubpart , blr )
|
def input_dim ( self ) :
"""Extracts the input dimension of the domain ."""
|
n_cont = len ( self . get_continuous_dims ( ) )
n_disc = len ( self . get_discrete_dims ( ) )
return n_cont + n_disc
|
def get_runtime_value ( self , ihcid : int ) :
"""Get runtime value with re - authenticate if needed"""
|
if self . client . get_runtime_value ( ihcid ) :
return True
self . re_authenticate ( )
return self . client . get_runtime_value ( ihcid )
|
def _resolve_deps ( self , formula_def ) :
'''Return a list of packages which need to be installed , to resolve all
dependencies'''
|
pkg_info = self . pkgdb [ '{0}.info' . format ( self . db_prov ) ] ( formula_def [ 'name' ] )
if not isinstance ( pkg_info , dict ) :
pkg_info = { }
can_has = { }
cant_has = [ ]
if 'dependencies' in formula_def and formula_def [ 'dependencies' ] is None :
formula_def [ 'dependencies' ] = ''
for dep in formula_def . get ( 'dependencies' , '' ) . split ( ',' ) :
dep = dep . strip ( )
if not dep :
continue
if self . pkgdb [ '{0}.info' . format ( self . db_prov ) ] ( dep ) :
continue
if dep in self . avail_pkgs :
can_has [ dep ] = self . avail_pkgs [ dep ]
else :
cant_has . append ( dep )
optional = formula_def . get ( 'optional' , '' ) . split ( ',' )
recommended = formula_def . get ( 'recommended' , '' ) . split ( ',' )
inspected = [ ]
to_inspect = can_has . copy ( )
while to_inspect :
dep = next ( six . iterkeys ( to_inspect ) )
del to_inspect [ dep ]
# Don ' t try to resolve the same package more than once
if dep in inspected :
continue
inspected . append ( dep )
repo_contents = self . repo_metadata . get ( can_has [ dep ] , { } )
repo_packages = repo_contents . get ( 'packages' , { } )
dep_formula = repo_packages . get ( dep , { } ) . get ( 'info' , { } )
also_can , also_cant , opt_dep , rec_dep = self . _resolve_deps ( dep_formula )
can_has . update ( also_can )
cant_has = sorted ( set ( cant_has + also_cant ) )
optional = sorted ( set ( optional + opt_dep ) )
recommended = sorted ( set ( recommended + rec_dep ) )
return can_has , cant_has , optional , recommended
|
def Delete ( self , n = 1 , dl = 0 ) :
"""删除键n次"""
|
self . Delay ( dl )
self . keyboard . tap_key ( self . keyboard . delete_key , n )
|
def qteGetAppletHandle ( self , appletID : str ) :
"""Return a handle to ` ` appletID ` ` .
If no applet with ID ` ` appletID ` ` exists then * * None * * is
returned .
| Args |
* ` ` appletID ` ` ( * * str * * ) : ID of applet .
| Returns |
* * * QtmacsApplet * * : handle to applet with ID ` ` appletID ` ` .
| Raises |
* * * QtmacsArgumentError * * if at least one argument has an invalid type ."""
|
# Compile list of applet Ids .
id_list = [ _ . qteAppletID ( ) for _ in self . _qteAppletList ]
# If one of the applets has ` ` appletID ` ` then return a
# reference to it .
if appletID in id_list :
idx = id_list . index ( appletID )
return self . _qteAppletList [ idx ]
else :
return None
|
def unlink ( link ) :
"""os . unlink ( ) but handle junction points on Windows ."""
|
if islink ( link ) and platform . system ( ) == "Windows" and sys . version_info [ : 2 ] < ( 3 , 5 ) : # deleting junction points was added to os . unlink in 3.5
# https : / / bugs . python . org / issue18314
subprocess . check_call ( [ "rmdir" , link ] , shell = True )
else :
os . unlink ( link )
|
def delete ( name , remove = False , force = False ) :
'''Remove a user from the minion
CLI Example :
. . code - block : : bash
salt ' * ' user . delete name remove = True force = True'''
|
if salt . utils . stringutils . contains_whitespace ( name ) :
raise SaltInvocationError ( 'Username cannot contain whitespace' )
if not info ( name ) :
return True
# force is added for compatibility with user . absent state function
if force :
log . warning ( 'force option is unsupported on MacOS, ignoring' )
# remove home directory from filesystem
if remove :
__salt__ [ 'file.remove' ] ( info ( name ) [ 'home' ] )
# Remove from any groups other than primary group . Needs to be done since
# group membership is managed separately from users and an entry for the
# user will persist even after the user is removed .
chgroups ( name , ( ) )
return _dscl ( [ '/Users/{0}' . format ( name ) ] , ctype = 'delete' ) [ 'retcode' ] == 0
|
def get_host_finger ( protocol , ip , port , timeout = 5 ) :
"""获取远程主机特定端口下服务的指纹
: param protocol : 协议 , tcp / udp
: params ip : ip
: params port : 端口
: return : 服务器指纹"""
|
client = None
msg = b"Hello, Server\r\n"
if protocol == 'tcp' : # tcp 协议
client = socket . socket ( socket . AF_INET , socket . SOCK_STREAM )
client . settimeout ( timeout )
client . connect ( ( ip , port ) )
client . send ( msg )
elif protocol == 'udp' : # udp 协议
client = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
client . settimeout ( timeout )
client . sendto ( msg , ( ip , port ) )
else :
raise Exception ( '协议不支持' )
serverFinger = client . recv ( 1024 )
client . close ( )
return serverFinger
|
def clear_lock ( self , abspath = True ) :
"""Clean any conda lock in the system ."""
|
cmd_list = [ 'clean' , '--lock' , '--json' ]
return self . _call_and_parse ( cmd_list , abspath = abspath )
|
def prior_predictive_to_xarray ( self ) :
"""Convert prior _ predictive samples to xarray ."""
|
prior = self . prior
prior_model = self . prior_model
prior_predictive = self . prior_predictive
data = get_draws_stan3 ( prior , model = prior_model , variables = prior_predictive )
return dict_to_dataset ( data , library = self . stan , coords = self . coords , dims = self . dims )
|
def generate_init ( self , dst , out_format , vms_to_include , filters = None ) :
"""Generate an init file which represents this env and can
be used with the images created by self . export _ vms
Args :
dst ( str ) : path and name of the new init file
out _ format ( plugins . output . OutFormatPlugin ) :
formatter for the output ( the default is yaml )
filters ( list ) : list of paths to keys that should be removed from
the init file
vms _ to _ include ( list of : class : lago . plugins . vm . VMPlugin ) :
list of vms to include in the init file
Returns :
None"""
|
# todo : move this logic to PrefixExportManager
with LogTask ( 'Exporting init file to: {}' . format ( dst ) ) : # Set the default formatter to yaml . The default formatter
# doesn ' t generate a valid init file , so it ' s not reasonable
# to use it
if isinstance ( out_format , plugins . output . DefaultOutFormatPlugin ) :
out_format = plugins . output . YAMLOutFormatPlugin ( )
if not filters :
filters = [ 'domains/*/disks/*/metadata' , 'domains/*/metadata/deploy-scripts' , 'domains/*/snapshots' , 'domains/*/name' , 'nets/*/mapping' , 'nets/*/dns_records' ]
spec = self . get_env_spec ( filters )
temp = { }
for vm in vms_to_include :
temp [ vm . name ( ) ] = spec [ 'domains' ] [ vm . name ( ) ]
spec [ 'domains' ] = temp
for _ , domain in spec [ 'domains' ] . viewitems ( ) :
domain [ 'disks' ] = [ d for d in domain [ 'disks' ] if not d . get ( 'skip-export' ) ]
for disk in domain [ 'disks' ] :
if disk [ 'type' ] == 'template' :
disk [ 'template_type' ] = 'qcow2'
elif disk [ 'type' ] == 'empty' :
disk [ 'type' ] = 'file'
disk [ 'make_a_copy' ] = 'True'
# Insert the relative path to the exported images
disk [ 'path' ] = os . path . join ( '$LAGO_INITFILE_PATH' , os . path . basename ( disk [ 'path' ] ) )
with open ( dst , 'wt' ) as f :
if isinstance ( out_format , plugins . output . YAMLOutFormatPlugin ) : # Dump the yaml file without type tags
# TODO : Allow passing parameters to output plugins
f . write ( yaml . safe_dump ( spec ) )
else :
f . write ( out_format . format ( spec ) )
|
def dump_dataflow_images ( df , index = 0 , batched = True , number = 1000 , output_dir = None , scale = 1 , resize = None , viz = None , flipRGB = False ) :
"""Dump or visualize images of a : class : ` DataFlow ` .
Args :
df ( DataFlow ) : the DataFlow .
index ( int ) : the index of the image component .
batched ( bool ) : whether the component contains batched images ( NHW or
NHWC ) or not ( HW or HWC ) .
number ( int ) : how many datapoint to take from the DataFlow .
output _ dir ( str ) : output directory to save images , default to not save .
scale ( float ) : scale the value , usually either 1 or 255.
resize ( tuple or None ) : tuple of ( h , w ) to resize the images to .
viz ( tuple or None ) : tuple of ( h , w ) determining the grid size to use
with : func : ` gen _ stack _ patches ` for visualization . No visualization will happen by
default .
flipRGB ( bool ) : apply a RGB < - > BGR conversion or not ."""
|
if output_dir :
mkdir_p ( output_dir )
if viz is not None :
viz = shape2d ( viz )
vizsize = viz [ 0 ] * viz [ 1 ]
if resize is not None :
resize = tuple ( shape2d ( resize ) )
vizlist = [ ]
df . reset_state ( )
cnt = 0
while True :
for dp in df :
if not batched :
imgbatch = [ dp [ index ] ]
else :
imgbatch = dp [ index ]
for img in imgbatch :
cnt += 1
if cnt == number :
return
if scale != 1 :
img = img * scale
if resize is not None :
img = cv2 . resize ( img , resize )
if flipRGB :
img = img [ : , : , : : - 1 ]
if output_dir :
fname = os . path . join ( output_dir , '{:03d}.jpg' . format ( cnt ) )
cv2 . imwrite ( fname , img )
if viz is not None :
vizlist . append ( img )
if viz is not None and len ( vizlist ) >= vizsize :
stack_patches ( vizlist [ : vizsize ] , nr_row = viz [ 0 ] , nr_col = viz [ 1 ] , viz = True )
vizlist = vizlist [ vizsize : ]
|
def alignLandmarks ( source , target , rigid = False ) :
"""Find best matching of source points towards target
in the mean least square sense , in one single step ."""
|
lmt = vtk . vtkLandmarkTransform ( )
ss = source . polydata ( ) . GetPoints ( )
st = target . polydata ( ) . GetPoints ( )
if source . N ( ) != target . N ( ) :
vc . printc ( '~times Error in alignLandmarks(): Source and Target with != nr of points!' , source . N ( ) , target . N ( ) , c = 1 )
exit ( )
lmt . SetSourceLandmarks ( ss )
lmt . SetTargetLandmarks ( st )
if rigid :
lmt . SetModeToRigidBody ( )
lmt . Update ( )
tf = vtk . vtkTransformPolyDataFilter ( )
tf . SetInputData ( source . polydata ( ) )
tf . SetTransform ( lmt )
tf . Update ( )
actor = Actor ( tf . GetOutput ( ) )
actor . info [ "transform" ] = lmt
pr = vtk . vtkProperty ( )
pr . DeepCopy ( source . GetProperty ( ) )
actor . SetProperty ( pr )
return actor
|
def read_local_config ( cfg ) :
"""Parses local config file for override values
Args :
: local _ file ( str ) : filename of local config file
Returns :
dict object of values contained in local config file"""
|
try :
if os . path . exists ( cfg ) :
config = import_file_object ( cfg )
return config
else :
logger . warning ( '%s: local config file (%s) not found, cannot be read' % ( inspect . stack ( ) [ 0 ] [ 3 ] , str ( cfg ) ) )
except IOError as e :
logger . warning ( 'import_file_object: %s error opening %s' % ( str ( e ) , str ( cfg ) ) )
return { }
|
def addPoint ( self , x , y ) :
"""Adds a new chart point to this item .
: param x | < variant >
y | < variant >"""
|
self . _points . append ( ( x , y ) )
self . _dirty = True
|
def tags_getListUserPopular ( user_id = '' , count = '' ) :
"""Gets the popular tags for a user in dictionary form tag = > count"""
|
method = 'flickr.tags.getListUserPopular'
auth = user_id == ''
data = _doget ( method , auth = auth , user_id = user_id )
result = { }
if isinstance ( data . rsp . tags . tag , list ) :
for tag in data . rsp . tags . tag :
result [ tag . text ] = tag . count
else :
result [ data . rsp . tags . tag . text ] = data . rsp . tags . tag . count
return result
|
def trigger ( self , only_manual = True ) :
"""Trigger a quick - action automation ."""
|
if not self . is_quick_action and only_manual :
raise AbodeException ( ( ERROR . TRIGGER_NON_QUICKACTION ) )
url = CONST . AUTOMATION_APPLY_URL
url = url . replace ( '$AUTOMATIONID$' , self . automation_id )
self . _abode . send_request ( method = "put" , url = url , data = self . _automation )
return True
|
def retrieve_list ( self , session , filters , * args , ** kwargs ) :
"""Retrieves a list of the model for this manager .
It is restricted by the filters provided .
: param Session session : The SQLAlchemy session to use
: param dict filters : The filters to restrict the returned
models on
: return : A tuple of the list of dictionary representation
of the models and the dictionary of meta data
: rtype : list , dict"""
|
query = self . queryset ( session )
translator = IntegerField ( 'tmp' )
pagination_count = translator . translate ( filters . pop ( self . pagination_count_query_arg , self . paginate_by ) )
pagination_pk = translator . translate ( filters . pop ( self . pagination_pk_query_arg , 1 ) )
pagination_pk -= 1
# logic works zero based . Pagination shouldn ' t be though
query = query . filter_by ( ** filters )
if pagination_pk :
query = query . offset ( pagination_pk * pagination_count )
if pagination_count :
query = query . limit ( pagination_count + 1 )
count = query . count ( )
next_link = None
previous_link = None
if count > pagination_count :
next_link = { self . pagination_pk_query_arg : pagination_pk + 2 , self . pagination_count_query_arg : pagination_count }
if pagination_pk > 0 :
previous_link = { self . pagination_pk_query_arg : pagination_pk , self . pagination_count_query_arg : pagination_count }
field_dict = self . dot_field_list_to_dict ( self . list_fields )
props = self . serialize_model ( query [ : pagination_count ] , field_dict = field_dict )
meta = dict ( links = dict ( next = next_link , previous = previous_link ) )
return props , meta
|
def get_jobs ( job_queue = 'run_reach_queue' , job_status = 'RUNNING' ) :
"""Returns a list of dicts with jobName and jobId for each job with the
given status ."""
|
batch = boto3 . client ( 'batch' )
jobs = batch . list_jobs ( jobQueue = job_queue , jobStatus = job_status )
return jobs . get ( 'jobSummaryList' )
|
def fix_paths ( project_data , rel_path , extensions ) :
"""Fix paths for extension list"""
|
norm_func = lambda path : os . path . normpath ( os . path . join ( rel_path , path ) )
for key in extensions :
if type ( project_data [ key ] ) is dict :
for k , v in project_data [ key ] . items ( ) :
project_data [ key ] [ k ] = [ norm_func ( i ) for i in v ]
elif type ( project_data [ key ] ) is list :
project_data [ key ] = [ norm_func ( i ) for i in project_data [ key ] ]
else :
project_data [ key ] = norm_func ( project_data [ key ] )
|
def binary_value ( self ) :
"""Converts this field to binary ( assuming it ' s a binary string )"""
|
str_raw_value = str ( self . raw_value )
if len ( str_raw_value ) % 2 == 1 :
str_raw_value = '0' + str_raw_value
return binascii . unhexlify ( str_raw_value )
|
def to_curl ( request , compressed = False , verify = True ) :
"""Returns string with curl command by provided request object
Parameters
compressed : bool
If ` True ` then ` - - compressed ` argument will be added to result"""
|
parts = [ ( 'curl' , None ) , ( '-X' , request . method ) , ]
for k , v in sorted ( request . headers . items ( ) ) :
parts += [ ( '-H' , '{0}: {1}' . format ( k , v ) ) ]
if request . body :
body = request . body
if isinstance ( body , bytes ) :
body = body . decode ( 'utf-8' )
parts += [ ( '-d' , body ) ]
if compressed :
parts += [ ( '--compressed' , None ) ]
if not verify :
parts += [ ( '--insecure' , None ) ]
parts += [ ( None , request . url ) ]
flat_parts = [ ]
for k , v in parts :
if k :
flat_parts . append ( k )
if v :
flat_parts . append ( "'{0}'" . format ( v ) )
return ' ' . join ( flat_parts )
|
def delete ( cls , bucket , key ) :
"""Delete a tag ."""
|
with db . session . begin_nested ( ) :
cls . query . filter_by ( bucket_id = as_bucket_id ( bucket ) , key = key , ) . delete ( )
|
def AddHeadwayPeriod ( self , start_time , end_time , headway_secs , problem_reporter = problems_module . default_problem_reporter ) :
"""Deprecated . Please use AddFrequency instead ."""
|
warnings . warn ( "No longer supported. The HeadwayPeriod class was renamed to " "Frequency, and all related functions were renamed " "accordingly." , DeprecationWarning )
self . AddFrequency ( start_time , end_time , headway_secs , problem_reporter )
|
def handler ( self ) :
"""The current imported serialization handler module .
: return : The imported handler
: rtype : module"""
|
if not hasattr ( self , "_handler" ) :
self . _handler = sys . modules [ self . imported ]
return self . _handler
|
def versions ( self ) :
"""Return all version changes ."""
|
versions = [ ]
for v , _ in self . restarts :
if len ( versions ) == 0 or v != versions [ - 1 ] :
versions . append ( v )
return versions
|
def _ntp_dispatcher ( payload ) :
"""Returns the right class for a given NTP packet ."""
|
# By default , calling NTP ( ) will build a NTP packet as defined in RFC 5905
# ( see the code of NTPHeader ) . Use NTPHeader for extension fields and MAC .
if payload is None :
return NTPHeader
else :
length = len ( payload )
if length >= _NTP_PACKET_MIN_SIZE :
first_byte = orb ( payload [ 0 ] )
# Extract NTP mode
mode = first_byte & 7
return { 6 : NTPControl , 7 : NTPPrivate } . get ( mode , NTPHeader )
return conf . raw_layer
|
def split_sentences ( tokens ) :
"""Split sentences ( based on tokenised data ) , returns sentences as a list of lists of tokens , each sentence is a list of tokens"""
|
begin = 0
for i , token in enumerate ( tokens ) :
if is_end_of_sentence ( tokens , i ) :
yield tokens [ begin : i + 1 ]
begin = i + 1
if begin <= len ( tokens ) - 1 :
yield tokens [ begin : ]
|
def save_to_cache ( url , response_json ) :
"""Save an HTTP response json object to the cache .
If the request was sent to server via POST instead of GET , then URL should
be a GET - style representation of request . Users should always pass
OrderedDicts instead of dicts of parameters into request functions , so that
the parameters stay in the same order each time , producing the same URL
string , and thus the same hash . Otherwise the cache will eventually contain
multiple saved responses for the same request because the URL ' s parameters
appeared in a different order each time .
Parameters
url : string
the url of the request
response _ json : dict
the json response
Returns
None"""
|
if settings . use_cache :
if response_json is None :
log ( 'Saved nothing to cache because response_json is None' )
else : # create the folder on the disk if it doesn ' t already exist
if not os . path . exists ( settings . cache_folder ) :
os . makedirs ( settings . cache_folder )
# hash the url ( to make filename shorter than the often extremely
# long url )
filename = hashlib . md5 ( url . encode ( 'utf-8' ) ) . hexdigest ( )
cache_path_filename = os . path . join ( settings . cache_folder , os . extsep . join ( [ filename , 'json' ] ) )
# dump to json , and save to file
json_str = make_str ( json . dumps ( response_json ) )
with io . open ( cache_path_filename , 'w' , encoding = 'utf-8' ) as cache_file :
cache_file . write ( json_str )
log ( 'Saved response to cache file "{}"' . format ( cache_path_filename ) )
|
def _get_by ( key , val , l ) :
"""Out of list * l * return all elements that have * key = val *
This comes in handy when you are working with aggregated / bucketed queries"""
|
return [ x for x in l if _check_value_recursively ( key , val , x ) ]
|
def quasi_newton_uniform_blocks ( points , cells , * args , ** kwargs ) :
"""Lloyd ' s algorithm can be though of a diagonal - only Hessian ; this method
incorporates the diagonal blocks , too ."""
|
def get_new_points ( mesh ) : # TODO need copy ?
x = mesh . node_coords . copy ( )
x += update ( mesh )
# update ghosts
x [ ghosted_mesh . is_ghost_point ] = ghosted_mesh . reflect_ghost ( x [ ghosted_mesh . mirrors ] )
return x
ghosted_mesh = GhostedMesh ( points , cells )
runner ( get_new_points , ghosted_mesh , * args , ** kwargs , update_topology = lambda mesh : ghosted_mesh . update_topology ( ) , # get _ stats _ mesh = lambda mesh : ghosted _ mesh . get _ unghosted _ mesh ( ) ,
)
mesh = ghosted_mesh . get_unghosted_mesh ( )
return mesh . node_coords , mesh . cells [ "nodes" ]
|
def importMzml ( filepath , msrunContainer = None , siAttrFromSmi = None , specfilename = None ) :
"""Performs a complete import of a mzml file into a maspy MsrunContainer .
: paramsiAttrFromSmi : allow here to specify a custom function that extracts params a from spectrumMetadataItem
: param specfilename : by default the filename will be used as the specfilename in the MsrunContainer and all
mzML item instances , specify here an alternative specfilename to override the default one"""
|
# TODO : docstring
siAttrFromSmi = defaultFetchSiAttrFromSmi if siAttrFromSmi is None else siAttrFromSmi
if msrunContainer is None :
msrunContainer = maspy . core . MsrunContainer ( )
basename = os . path . basename ( filepath )
dirname = os . path . dirname ( filepath )
filename , extension = os . path . splitext ( basename )
specfilename = filename if specfilename is None else specfilename
# Check if the specified file is valid for an import
if not os . path . isfile ( filepath ) :
raise IOError ( 'File does not exist: %s' % filepath )
elif extension . lower ( ) != '.mzml' :
raise IOError ( 'Filetype is not "mzml": %s' % filepath )
elif specfilename in msrunContainer . info :
print ( specfilename , 'already present in the msrunContainer, aborting import.' )
return None
mzmlReader = maspy . xml . MzmlReader ( filepath )
masterContainer = { 'rm' : str ( ) , 'ci' : { } , 'si' : { } , 'sai' : { } , 'smi' : { } }
# Dictionary recording which MS2 scans follow a MS1 scan
ms1Record = ddict ( list )
for xmlSpectrum in mzmlReader . parseSpectra ( ) :
smi , binaryDataArrayList = smiFromXmlSpectrum ( xmlSpectrum , specfilename )
# Generate SpectrumItem
si = maspy . core . Si ( smi . id , smi . specfile )
si . isValid = True
siAttrFromSmi ( smi , si )
if si . msLevel > 1 :
si . precursorId = si . precursorId . split ( 'scan=' ) [ 1 ]
# TODO : change to use regex to extract from known vendor format
ms1Record [ si . precursorId ] . append ( si . id )
else :
ms1Record [ si . id ]
# Touch the ddict to add the MS1 id , if it is not already present
# Generate SpectrumArrayItem
sai = maspy . core . Sai ( smi . id , smi . specfile )
sai . arrays , sai . arrayInfo = maspy . xml . extractBinaries ( binaryDataArrayList , smi . attributes [ 'defaultArrayLength' ] )
# Store all items in the appropriate containers
masterContainer [ 'smi' ] [ smi . id ] = smi
masterContainer [ 'si' ] [ smi . id ] = si
masterContainer [ 'sai' ] [ smi . id ] = sai
for siId , msnIdList in viewitems ( ms1Record ) : # Ignore KeyError if the spectrum is not present in the mzML file for whatever reason
try :
setattr ( masterContainer [ 'si' ] [ siId ] , 'msnIdList' , msnIdList )
except KeyError :
pass
for xmlChromatogram in mzmlReader . chromatogramList :
ci = ciFromXml ( xmlChromatogram , specfilename )
masterContainer [ 'ci' ] [ ci . id ] = ci
masterContainer [ 'rm' ] = mzmlReader . metadataNode
msrunContainer . _addSpecfile ( specfilename , dirname )
msrunContainer . rmc [ specfilename ] = masterContainer [ 'rm' ]
msrunContainer . info [ specfilename ] [ 'status' ] [ 'rm' ] = True
msrunContainer . smic [ specfilename ] = masterContainer [ 'smi' ]
msrunContainer . info [ specfilename ] [ 'status' ] [ 'smi' ] = True
msrunContainer . sic [ specfilename ] = masterContainer [ 'si' ]
msrunContainer . info [ specfilename ] [ 'status' ] [ 'si' ] = True
msrunContainer . saic [ specfilename ] = masterContainer [ 'sai' ]
msrunContainer . info [ specfilename ] [ 'status' ] [ 'sai' ] = True
msrunContainer . cic [ specfilename ] = masterContainer [ 'ci' ]
msrunContainer . info [ specfilename ] [ 'status' ] [ 'ci' ] = True
return msrunContainer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.