signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def as_curl ( self , endpoint , encoded_data , headers ) :
"""Return the send as a curl command .
Useful for debugging . This will write out the encoded data to a local
file named ` encrypted . data `
: param endpoint : Push service endpoint URL
: type endpoint : basestring
: param encoded _ data : byte array of encoded data
: type encoded _ data : bytearray
: param headers : Additional headers for the send
: type headers : dict
: returns string"""
|
header_list = [ '-H "{}: {}" \\ \n' . format ( key . lower ( ) , val ) for key , val in headers . items ( ) ]
data = ""
if encoded_data :
with open ( "encrypted.data" , "wb" ) as f :
f . write ( encoded_data )
data = "--data-binary @encrypted.data"
if 'content-length' not in headers :
header_list . append ( '-H "content-length: {}" \\ \n' . format ( len ( encoded_data ) ) )
return ( """curl -vX POST {url} \\\n{headers}{data}""" . format ( url = endpoint , headers = "" . join ( header_list ) , data = data ) )
|
def populate ( self , other ) :
"""Like update , but clears the contents first ."""
|
self . clear ( )
self . update ( other )
self . reset_all_changes ( )
|
def schaffer ( self , x ) :
"""Schaffer function x0 in [ - 100 . . 100]"""
|
N = len ( x )
s = x [ 0 : N - 1 ] ** 2 + x [ 1 : N ] ** 2
return sum ( s ** 0.25 * ( np . sin ( 50 * s ** 0.1 ) ** 2 + 1 ) )
|
def create_account ( self , balance = 0 , address = None , concrete_storage = False , dynamic_loader = None , creator = None , ) -> Account :
"""Create non - contract account .
: param address : The account ' s address
: param balance : Initial balance for the account
: param concrete _ storage : Interpret account storage as concrete
: param dynamic _ loader : used for dynamically loading storage from the block chain
: return : The new account"""
|
address = address if address else self . _generate_new_address ( creator )
new_account = Account ( address , balance = balance , dynamic_loader = dynamic_loader , concrete_storage = concrete_storage , )
self . _put_account ( new_account )
return new_account
|
def stream ( self ) :
"""Returns a generator of lines instead of a list of lines ."""
|
st = self . _stream ( )
for l in next ( st ) :
yield l . rstrip ( "\n" )
|
def main ( ) :
"""Main entry function ."""
|
if len ( sys . argv ) < 3 :
print ( 'Usage: <project-name> <filetype> <list-of-path to traverse>' )
print ( '\tfiletype can be python/cpp/all' )
exit ( - 1 )
_HELPER . project_name = sys . argv [ 1 ]
file_type = sys . argv [ 2 ]
allow_type = [ ]
if file_type == 'python' or file_type == 'all' :
allow_type += [ x for x in PYTHON_SUFFIX ]
if file_type == 'cpp' or file_type == 'all' :
allow_type += [ x for x in CXX_SUFFIX ]
allow_type = set ( allow_type )
if os . name != 'nt' :
sys . stderr = codecs . StreamReaderWriter ( sys . stderr , codecs . getreader ( 'utf8' ) , codecs . getwriter ( 'utf8' ) , 'replace' )
for path in sys . argv [ 3 : ] :
if os . path . isfile ( path ) :
process ( path , allow_type )
else :
for root , dirs , files in os . walk ( path ) :
for name in files :
process ( os . path . join ( root , name ) , allow_type )
nerr = _HELPER . print_summary ( sys . stderr )
sys . exit ( nerr > 0 )
|
def momentum ( self , exponent = 1 , errorrequested = True ) :
"""Calculate momenta ( integral of y times x ^ exponent )
The integration is done by the trapezoid formula ( np . trapz ) .
Inputs :
exponent : the exponent of q in the integration .
errorrequested : True if error should be returned ( true Gaussian
error - propagation of the trapezoid formula )"""
|
y = self . Intensity * self . q ** exponent
m = np . trapz ( y , self . q )
if errorrequested :
err = self . Error * self . q ** exponent
dm = errtrapz ( self . q , err )
return ErrorValue ( m , dm )
else :
return m
|
def get_inspect_for_image ( image , registry , insecure = False , dockercfg_path = None ) :
"""Return inspect for image .
: param image : ImageName , the remote image to inspect
: param registry : str , URI for registry , if URI schema is not provided ,
https : / / will be used
: param insecure : bool , when True registry ' s cert is not verified
: param dockercfg _ path : str , dirname of . dockercfg location
: return : dict of inspected image"""
|
all_man_digests = get_all_manifests ( image , registry , insecure = insecure , dockercfg_path = dockercfg_path )
blob_config = None
config_digest = None
image_inspect = { }
# we have manifest list ( get digest for 1st platform )
if 'v2_list' in all_man_digests :
man_list_json = all_man_digests [ 'v2_list' ] . json ( )
if man_list_json [ 'manifests' ] [ 0 ] [ 'mediaType' ] != MEDIA_TYPE_DOCKER_V2_SCHEMA2 :
raise RuntimeError ( 'Image {image_name}: v2 schema 1 ' 'in manifest list' . format ( image_name = image ) )
v2_digest = man_list_json [ 'manifests' ] [ 0 ] [ 'digest' ]
blob_config , config_digest = get_config_and_id_from_registry ( image , registry , v2_digest , insecure = insecure , version = 'v2' , dockercfg_path = dockercfg_path )
# get config for v2 digest
elif 'v2' in all_man_digests :
blob_config , config_digest = get_config_and_id_from_registry ( image , registry , image . tag , insecure = insecure , version = 'v2' , dockercfg_path = dockercfg_path )
# read config from v1
elif 'v1' in all_man_digests :
v1_json = all_man_digests [ 'v1' ] . json ( )
if PY2 :
blob_config = json . loads ( v1_json [ 'history' ] [ 0 ] [ 'v1Compatibility' ] . decode ( 'utf-8' ) )
else :
blob_config = json . loads ( v1_json [ 'history' ] [ 0 ] [ 'v1Compatibility' ] )
else :
raise RuntimeError ( "Image {image_name} not found: No v2 schema 1 image, or v2 schema 2 " "image or list, found" . format ( image_name = image ) )
# dictionary to convert config keys to inspect keys
config_2_inspect = { 'created' : 'Created' , 'os' : 'Os' , 'container_config' : 'ContainerConfig' , 'architecture' : 'Architecture' , 'docker_version' : 'DockerVersion' , 'config' : 'Config' , }
if not blob_config :
raise RuntimeError ( "Image {image_name}: Couldn't get inspect data " "from digest config" . format ( image_name = image ) )
# set Id , which isn ' t in config blob , won ' t be set for v1 , as for that image has to be pulled
image_inspect [ 'Id' ] = config_digest
# only v2 has rootfs , not v1
if 'rootfs' in blob_config :
image_inspect [ 'RootFS' ] = blob_config [ 'rootfs' ]
for old_key , new_key in config_2_inspect . items ( ) :
image_inspect [ new_key ] = blob_config [ old_key ]
return image_inspect
|
def index ( self , key ) :
"""Return the index of the given item .
: param key :
: return :"""
|
if isinstance ( key , int ) :
if 0 <= key < len ( self . __keys ) :
return key
raise IndexError ( key )
elif isinstance ( key , str ) :
try :
return self . __keys . index ( key )
except ValueError :
raise KeyError ( key )
else :
raise TypeError ( key )
|
def search ( cls , query , search_opts = None ) :
"""Search pools .
Maps to the function : py : func : ` nipap . backend . Nipap . search _ pool ` in
the backend . Please see the documentation for the backend function
for information regarding input arguments and return values ."""
|
if search_opts is None :
search_opts = { }
xmlrpc = XMLRPCConnection ( )
try :
search_result = xmlrpc . connection . search_pool ( { 'query' : query , 'search_options' : search_opts , 'auth' : AuthOptions ( ) . options } )
except xmlrpclib . Fault as xml_fault :
raise _fault_to_exception ( xml_fault )
result = dict ( )
result [ 'result' ] = [ ]
result [ 'search_options' ] = search_result [ 'search_options' ]
for pool in search_result [ 'result' ] :
p = Pool . from_dict ( pool )
result [ 'result' ] . append ( p )
return result
|
def writeFasta ( sequence , sequence_name , output_file ) :
"""Writes a fasta sequence into a file .
: param sequence : a string with the sequence to be written
: param sequence _ name : name of the the fasta sequence
: param output _ file : / path / to / file . fa to be written
: returns : nothing"""
|
i = 0
f = open ( output_file , 'w' )
f . write ( ">" + str ( sequence_name ) + "\n" )
while i <= len ( sequence ) :
f . write ( sequence [ i : i + 60 ] + "\n" )
i = i + 60
f . close ( )
|
def check_api_version ( resource_root , min_version ) :
"""Checks if the resource _ root ' s API version it at least the given minimum
version ."""
|
if resource_root . version < min_version :
raise Exception ( "API version %s is required but %s is in use." % ( min_version , resource_root . version ) )
|
def compute_Pi_J ( self , CDR3_seq , J_usage_mask ) :
"""Compute Pi _ J .
This function returns the Pi array from the model factors of the J genomic
contributions , P ( delJ | J ) . This corresponds to J ( D ) ^ { x _ 4 } .
For clarity in parsing the algorithm implementation , we include which
instance attributes are used in the method as ' parameters . '
Parameters
CDR3 _ seq : str
CDR3 sequence composed of ' amino acids ' ( single character symbols
each corresponding to a collection of codons as given by codons _ dict ) .
J _ usage _ mask : list
Indices of the J alleles to be considered in the Pgen computation
self . cutJ _ genomic _ CDR3 _ segs : list of strings
List of all the J genomic nucleotide sequences trimmed to begin at the
conserved 3 ' residue ( F / W ) and with the maximum number of palindromic
insertions appended .
self . PJdelJ _ nt _ pos _ vec : list of ndarrays
For each J allele , format P ( delJ | J ) into the correct form for a Pi
array or J ^ { x _ 2 } . This is only done for the first and last position
in each codon .
self . PJdelJ _ 2nd _ nt _ pos _ per _ aa _ vec : list of dicts
For each J allele , and each ' amino acid ' , format P ( delJ | J ) for
positions in the middle of a codon into the correct form for a Pi
array or J ^ { x _ 2 } given the ' amino acid ' .
Returns
Pi _ J : ndarray
(4 , 3L ) array corresponding to J ^ { x _ 4 } .
r _ J _ usage _ mask : list
Reduced J _ usage mask . J genes / alleles with no contribution ( bad
alignment ) are removed from the mask . This is done to speed up the
computation on the V side ( which must be done conditioned on the J ) ."""
|
# Note , the cutJ _ genomic _ CDR3 _ segs INCLUDE the palindromic insertions and thus are max _ palindrome nts longer than the template .
# furthermore , the genomic sequence should be pruned to start at a conserved region on the J side
Pi_J = [ ]
# Holds the aggregate weight for each nt possiblity and position
r_J_usage_mask = [ ]
for j , J_in in enumerate ( J_usage_mask ) :
try :
cutJ_gen_seg = self . cutJ_genomic_CDR3_segs [ J_in ]
except IndexError :
print 'Check provided J usage mask. Contains indicies out of allowed range.'
continue
current_alignment_length = self . max_nt_to_aa_alignment_right ( CDR3_seq , cutJ_gen_seg )
# alignment _ lengths + = [ current _ alignment _ length ]
current_Pi_J = np . zeros ( ( 4 , len ( CDR3_seq ) * 3 ) )
if current_alignment_length > 0 : # For first and last nt in a codon use PJdelJ _ nt _ pos _ vec
current_Pi_J [ : , - current_alignment_length : ] = self . PJdelJ_nt_pos_vec [ J_in ] [ : , - current_alignment_length : ]
for pos in range ( - 2 , - current_alignment_length - 1 , - 3 ) : # for middle nt use PJdelJ _ 2nd _ nt _ pos _ per _ aa _ vec
current_Pi_J [ : , pos ] = self . PJdelJ_2nd_nt_pos_per_aa_vec [ J_in ] [ CDR3_seq [ pos / 3 ] ] [ : , pos ]
if np . sum ( current_Pi_J ) > 0 :
Pi_J . append ( current_Pi_J )
r_J_usage_mask . append ( J_in )
return Pi_J , r_J_usage_mask
|
async def list_state ( self , request ) :
"""Fetches list of data entries , optionally filtered by address prefix .
Request :
query :
- head : The id of the block to use as the head of the chain
- address : Return entries whose addresses begin with this
prefix
Response :
data : An array of leaf objects with address and data keys
head : The head used for this query ( most recent if unspecified )
link : The link to this exact query , including head block
paging : Paging info and nav , like total resources and a next link"""
|
paging_controls = self . _get_paging_controls ( request )
head , root = await self . _head_to_root ( request . url . query . get ( 'head' , None ) )
validator_query = client_state_pb2 . ClientStateListRequest ( state_root = root , address = request . url . query . get ( 'address' , None ) , sorting = self . _get_sorting_message ( request , "default" ) , paging = self . _make_paging_message ( paging_controls ) )
response = await self . _query_validator ( Message . CLIENT_STATE_LIST_REQUEST , client_state_pb2 . ClientStateListResponse , validator_query )
return self . _wrap_paginated_response ( request = request , response = response , controls = paging_controls , data = response . get ( 'entries' , [ ] ) , head = head )
|
def QA_indicator_OBV ( DataFrame ) :
"""能量潮"""
|
VOL = DataFrame . volume
CLOSE = DataFrame . close
return pd . DataFrame ( { 'OBV' : np . cumsum ( IF ( CLOSE > REF ( CLOSE , 1 ) , VOL , IF ( CLOSE < REF ( CLOSE , 1 ) , - VOL , 0 ) ) ) / 10000 } )
|
def get_string_length ( self ) :
"""Attempts to parse array size out of the address"""
|
try :
return self . get_array_size ( )
except :
match = re . search ( r"(?<=\.)\d+" , self . get_address ( ) )
try :
return int ( match . group ( 0 ) )
except Exception as ex :
raise Exception ( 'Could not get string size of {0} address {1}' . format ( self . name , self . get_address ( ) ) , ex )
|
def plot ( self , plot_intermediate_solutions = True , plot_observed_data = True , plot_starting_trajectory = True , plot_optimal_trajectory = True , filter_plots_function = None , legend = True , kwargs_observed_data = None , kwargs_starting_trajectories = None , kwargs_optimal_trajectories = None , kwargs_intermediate_trajectories = None ) :
"""Plot the inference result .
: param plot _ intermediate _ solutions : plot the trajectories resulting from the intermediate solutions as well
: param filter _ plots _ function : A function that takes a trajectory object and returns True if it should be
plotted and false if not . None plots all available trajectories
: param legend : Whether to draw the legend or not
: param kwargs _ observed _ data : Kwargs to be passed to the ` ` trajectory . plot ` ` function for the observed data
: param kwargs _ starting _ trajectories : kwargs to be passed to the ` ` trajectory . plot ` ` function for the starting
trajectories
: param kwargs _ optimal _ trajectories : kwargs to be passed to the ` ` trajectory . plot ` ` function for the optimal
trajectories
: param kwargs _ intermediate _ trajectories : kwargs to be passed to the ` ` trajectory . plot ` ` function for the
intermediate trajectories"""
|
from matplotlib import pyplot as plt
if filter_plots_function is None :
filter_plots_function = lambda x : True
observed_trajectories = self . observed_trajectories
starting_trajectories = self . starting_trajectories
optimal_trajectories = self . optimal_trajectories
if plot_intermediate_solutions :
intermediate_trajectories_list = self . intermediate_trajectories
else :
intermediate_trajectories_list = [ ]
def initialise_default_kwargs ( kwargs , default_data ) :
if kwargs is None :
kwargs = { }
for key , value in default_data . iteritems ( ) :
if key not in kwargs :
kwargs [ key ] = value
return kwargs
trajectories_by_description = { }
kwargs_observed_data = initialise_default_kwargs ( kwargs_observed_data , { 'label' : "Observed data" , 'marker' : '+' , 'color' : 'black' , 'linestyle' : 'None' } )
kwargs_optimal_trajectories = initialise_default_kwargs ( kwargs_optimal_trajectories , { 'label' : "Optimised Trajectory" , 'color' : 'blue' } )
kwargs_starting_trajectories = initialise_default_kwargs ( kwargs_starting_trajectories , { 'label' : "Starting trajectory" , 'color' : 'green' } )
kwargs_intermediate_trajectories = initialise_default_kwargs ( kwargs_intermediate_trajectories , { 'label' : 'Intermediate Trajectories' , 'alpha' : 0.1 , 'color' : 'cyan' } )
if plot_observed_data :
for trajectory in observed_trajectories :
if not filter_plots_function ( trajectory ) :
continue
try :
list_ = trajectories_by_description [ trajectory . description ]
except KeyError :
list_ = [ ]
trajectories_by_description [ trajectory . description ] = list_
list_ . append ( ( trajectory , kwargs_observed_data ) )
if plot_starting_trajectory :
for trajectory in starting_trajectories :
if not filter_plots_function ( trajectory ) :
continue
try :
list_ = trajectories_by_description [ trajectory . description ]
except KeyError :
list_ = [ ]
trajectories_by_description [ trajectory . description ] = list_
list_ . append ( ( trajectory , kwargs_starting_trajectories ) )
seen_intermediate_trajectories = set ( )
for i , intermediate_trajectories in enumerate ( intermediate_trajectories_list ) :
for trajectory in intermediate_trajectories :
if not filter_plots_function ( trajectory ) :
continue
seen = trajectory . description in seen_intermediate_trajectories
kwargs = kwargs_intermediate_trajectories . copy ( )
# Only set label once
if not seen :
seen_intermediate_trajectories . add ( trajectory . description )
else :
kwargs [ 'label' ] = ''
try :
list_ = trajectories_by_description [ trajectory . description ]
except KeyError :
list_ = [ ]
trajectories_by_description [ trajectory . description ] = list_
list_ . append ( ( trajectory , kwargs ) )
if plot_optimal_trajectory :
for trajectory in optimal_trajectories :
if not filter_plots_function ( trajectory ) :
continue
try :
list_ = trajectories_by_description [ trajectory . description ]
except KeyError :
list_ = [ ]
trajectories_by_description [ trajectory . description ] = list_
list_ . append ( ( trajectory , kwargs_optimal_trajectories ) )
for description , trajectories_list in trajectories_by_description . iteritems ( ) :
if len ( trajectories_by_description ) > 1 :
plt . figure ( )
plt . title ( description )
for trajectory , kwargs in trajectories_list :
trajectory . plot ( ** kwargs )
if legend :
plt . legend ( bbox_to_anchor = ( 1.05 , 1 ) , loc = 2 , borderaxespad = 0.0 )
|
def read ( self ) :
"""Read stdout and stdout pipes if process is no longer running ."""
|
if self . _process and self . _process . poll ( ) is not None :
ip = get_ipython ( )
err = ip . user_ns [ 'error' ] . read ( ) . decode ( )
out = ip . user_ns [ 'output' ] . read ( ) . decode ( )
else :
out = ''
err = ''
return out , err
|
def base_image_inspect ( self ) :
"""inspect base image
: return : dict"""
|
if self . _base_image_inspect is None :
if self . base_from_scratch :
self . _base_image_inspect = { }
elif self . parents_pulled or self . custom_base_image :
try :
self . _base_image_inspect = self . tasker . inspect_image ( self . base_image )
except docker . errors . NotFound : # If the base image cannot be found throw KeyError -
# as this property should behave like a dict
raise KeyError ( "Unprocessed base image Dockerfile cannot be inspected" )
else :
self . _base_image_inspect = atomic_reactor . util . get_inspect_for_image ( self . base_image , self . base_image . registry , self . base_image_insecure , self . base_image_dockercfg_path )
base_image_str = str ( self . base_image )
if base_image_str not in self . _parent_images_inspect :
self . _parent_images_inspect [ base_image_str ] = self . _base_image_inspect
return self . _base_image_inspect
|
def uploadFileToIM ( self , directory , filename , title ) :
"""Parameters as they look in the form for uploading packages to IM"""
|
self . logger . debug ( "uploadFileToIM(" + "{},{},{})" . format ( directory , filename , title ) )
parameters = { 'data-filename-placement' : 'inside' , 'title' : str ( filename ) , 'filename' : str ( filename ) , 'type' : 'file' , 'name' : 'files' , 'id' : 'fileToUpload' , 'multiple' : '' }
file_dict = { 'files' : ( str ( filename ) , open ( directory + filename , 'rb' ) , 'application/x-rpm' ) }
m = MultipartEncoder ( fields = file_dict )
temp_username = self . _username
temp_password = self . _password
temp_im_api_url = self . _im_api_url
temp_im_session = requests . Session ( )
temp_im_session . mount ( 'https://' , TLS1Adapter ( ) )
temp_im_verify_ssl = self . _im_verify_ssl
resp = temp_im_session . post ( "{}/{}" . format ( temp_im_api_url , "types/InstallationPackage/instances/uploadPackage" ) , auth = HTTPBasicAuth ( temp_username , temp_password ) , # headers = m . content _ type ,
files = file_dict , verify = False , data = parameters )
self . logger . info ( "Uploaded: " + "{}" . format ( filename ) )
self . logger . debug ( "HTTP Response: " + "{}" . format ( resp . status_code ) )
|
def add_volume ( self , volume ) :
"""Add a volume to self . volumes if it isn ' t already present"""
|
for old_vol in self . volumes :
if volume == old_vol :
return
self . volumes . append ( volume )
|
def _regex_strings ( self ) :
"""A property to link into IntentEngine ' s _ regex _ strings .
Warning : this is only for backwards compatiblility and should not be used if you
intend on using domains .
Returns : the domains _ regex _ strings from its IntentEngine"""
|
domain = 0
if domain not in self . domains :
self . register_domain ( domain = domain )
return self . domains [ domain ] . _regex_strings
|
def find_zero_constrained_reactions ( model ) :
"""Return list of reactions that are constrained to zero flux ."""
|
return [ rxn for rxn in model . reactions if rxn . lower_bound == 0 and rxn . upper_bound == 0 ]
|
def measure_board_rms ( control_board , n_samples = 10 , sampling_ms = 10 , delay_between_samples_ms = 0 ) :
'''Read RMS voltage samples from control board high - voltage feedback circuit .'''
|
try :
results = control_board . measure_impedance ( n_samples , sampling_ms , delay_between_samples_ms , True , True , [ ] )
except RuntimeError : # ` RuntimeError ` may be raised if , for example , current limit was
# reached during measurement . In such cases , return an empty frame .
logger . warning ( 'Error encountered during high-voltage RMS ' 'measurement.' , exc_info = True )
data = pd . DataFrame ( None , columns = [ 'board measured V' , 'divider resistor index' ] )
else :
data = pd . DataFrame ( { 'board measured V' : results . V_hv } )
data [ 'divider resistor index' ] = results . hv_resistor
return data
|
def set_attributes_from_headers ( self , headers ) :
"""Set instance attributes with HTTP header
: param headers : HTTP header"""
|
self . total_count = headers . get ( 'x-total-count' , None )
self . current_page = headers . get ( 'x-current-page' , None )
self . per_page = headers . get ( 'x-per-page' , None )
self . user_type = headers . get ( 'x-user-type' , None )
if self . total_count :
self . total_count = int ( self . total_count )
if self . current_page :
self . current_page = int ( self . current_page )
if self . per_page :
self . per_page = int ( self . per_page )
|
def advertise ( self , routers = None , name = None , timeout = None , router_file = None , jitter = None , ) :
"""Make a service available on the Hyperbahn routing mesh .
This will make contact with a Hyperbahn host from a list of known
Hyperbahn routers . Additional Hyperbahn connections will be established
once contact has been made with the network .
: param router :
A seed list of addresses of Hyperbahn routers , e . g . ,
` ` [ " 127.0.0.1:23000 " ] ` ` .
: param name :
The identity of this service on the Hyperbahn .
This is usually unnecessary , as it defaults to the name given when
initializing the : py : class : ` TChannel ` ( which is used as your
identity as a caller ) .
: returns :
A future that resolves to the remote server ' s response after
the first advertise finishes .
Advertisement will continue to happen periodically ."""
|
name = name or self . name
if not self . is_listening ( ) :
self . listen ( )
return hyperbahn . advertise ( self , name , routers , timeout , router_file , jitter , )
|
def exists ( self ) :
""": return : True if the submodule exists , False otherwise . Please note that
a submodule may exist ( in the . gitmodules file ) even though its module
doesn ' t exist on disk"""
|
# keep attributes for later , and restore them if we have no valid data
# this way we do not actually alter the state of the object
loc = locals ( )
for attr in self . _cache_attrs :
try :
if hasattr ( self , attr ) :
loc [ attr ] = getattr ( self , attr )
# END if we have the attribute cache
except ( cp . NoSectionError , ValueError ) : # on PY3 , this can happen apparently . . . don ' t know why this doesn ' t happen on PY2
pass
# END for each attr
self . _clear_cache ( )
try :
try :
self . path
return True
except Exception :
return False
# END handle exceptions
finally :
for attr in self . _cache_attrs :
if attr in loc :
setattr ( self , attr , loc [ attr ] )
|
def write_yaml ( self , data , encoding = None , errors = None , newline = None , ** kwargs ) :
"""Read * data * to this path as a YAML document .
The * encoding * , * errors * , and * newline * keywords are passed to
: meth : ` open ` . The remaining * kwargs * are passed to : meth : ` yaml . dump ` ."""
|
import yaml
with self . open ( mode = 'wt' , encoding = encoding , errors = errors , newline = newline ) as f :
return yaml . dump ( data , stream = f , ** kwargs )
|
def from_binary_string ( cls , stream ) :
"""Read feedback information from the stream and unpack it .
: param stream : A stream of feedback data from APN . Can contain multiple
feedback tuples , as defined in the feedback service protocol .
: return A list containing all unpacked feedbacks ."""
|
offset = 0
length = len ( stream )
feedbacks = [ ]
while offset < length :
timestamp , token_length = struct . unpack ( cls . FORMAT_PREFIX , stream [ offset : offset + 6 ] )
when = datetime . fromtimestamp ( timestamp )
offset += 6
token = struct . unpack ( '>{0}s' . format ( token_length ) , stream [ offset : offset + token_length ] ) [ 0 ]
token = binascii . hexlify ( token )
offset += token_length
feedbacks . append ( cls ( when , token ) )
return feedbacks
|
def iat ( x , maxlag = None ) :
"""Calculate the integrated autocorrelation time ( IAT ) , given the trace from a Stochastic ."""
|
if not maxlag : # Calculate maximum lag to which autocorrelation is calculated
maxlag = _find_max_lag ( x )
acr = [ autocorr ( x , lag ) for lag in range ( 1 , maxlag + 1 ) ]
# Calculate gamma values
gammas = [ ( acr [ 2 * i ] + acr [ 2 * i + 1 ] ) for i in range ( maxlag // 2 ) ]
cut = _cut_time ( gammas )
if cut + 1 == len ( gammas ) :
print_ ( "Not enough lag to calculate IAT" )
return np . sum ( 2 * gammas [ : cut + 1 ] ) - 1.0
|
def explode ( self ) :
"""If the current Line entity consists of multiple line
break it up into n Line entities .
Returns
exploded : ( n , ) Line entities"""
|
points = np . column_stack ( ( self . points , self . points ) ) . ravel ( ) [ 1 : - 1 ] . reshape ( ( - 1 , 2 ) )
exploded = [ Line ( i ) for i in points ]
return exploded
|
def delayed_close ( self ) :
"""Delayed close - won ' t close immediately , but on the next reactor
loop ."""
|
self . state = SESSION_STATE . CLOSING
reactor . callLater ( 0 , self . close )
|
def documentation ( default = None , api_version = None , api = None , ** kwargs ) :
"""returns documentation for the current api"""
|
api_version = default or api_version
if api :
return api . http . documentation ( base_url = "" , api_version = api_version )
|
def clear_data ( self ) :
"""Clear menu data from previous menu generation ."""
|
self . __header . title = None
self . __header . subtitle = None
self . __prologue . text = None
self . __epilogue . text = None
self . __items_section . items = None
|
def clientConnectionFailed ( self , err , address : Address ) :
"""Called when we fail to connect to an endpoint
Args :
err : Twisted Failure instance
address : the address we failed to connect to"""
|
if type ( err . value ) == error . TimeoutError :
logger . debug ( f"Failed connecting to {address} connection timed out" )
elif type ( err . value ) == error . ConnectError :
ce = err . value
if len ( ce . args ) > 0 :
logger . debug ( f"Failed connecting to {address} {ce.args[0].value}" )
else :
logger . debug ( f"Failed connecting to {address}" )
else :
logger . debug ( f"Failed connecting to {address} {err.value}" )
self . peers_connecting -= 1
self . RemoveKnownAddress ( address )
self . RemoveFromQueue ( address )
# if we failed to connect to new addresses , we should always add them to the DEAD _ ADDRS list
self . AddDeadAddress ( address )
# for testing
return err . type
|
def perform_request ( self , request ) :
'''Sends an HTTPRequest to Azure Storage and returns an HTTPResponse . If
the response code indicates an error , raise an HTTPError .
: param HTTPRequest request :
The request to serialize and send .
: return : An HTTPResponse containing the parsed HTTP response .
: rtype : : class : ` ~ azure . storage . common . _ http . HTTPResponse `'''
|
# Verify the body is in bytes or either a file - like / stream object
if request . body :
request . body = _get_data_bytes_or_stream_only ( 'request.body' , request . body )
# Construct the URI
uri = self . protocol . lower ( ) + '://' + request . host + request . path
# Send the request
response = self . session . request ( request . method , uri , params = request . query , headers = request . headers , data = request . body or None , timeout = self . timeout , proxies = self . proxies )
# Parse the response
status = int ( response . status_code )
response_headers = { }
for key , name in response . headers . items ( ) : # Preserve the case of metadata
if key . lower ( ) . startswith ( 'x-ms-meta-' ) :
response_headers [ key ] = name
else :
response_headers [ key . lower ( ) ] = name
wrap = HTTPResponse ( status , response . reason , response_headers , response . content )
response . close ( )
return wrap
|
def ansi_split ( text , _re = re . compile ( u"(\x1b\\[(\\d*;?)*\\S)" ) ) :
"""Yields ( is _ ansi , text )"""
|
for part in _re . split ( text ) :
if part :
yield ( bool ( _re . match ( part ) ) , part )
|
def frameify ( self , state , data ) :
"""Split data into a sequence of lines ."""
|
# Pull in any partially - processed data
data = state . recv_buf + data
# Loop over the data
while data :
line , sep , rest = data . partition ( '\n' )
# Did we have a whole line ?
if sep != '\n' :
break
# OK , update the data . . .
data = rest
# Now , strip off carriage return , if there is one
if self . carriage_return and line [ - 1 ] == '\r' :
line = line [ : - 1 ]
# Yield the line
try :
yield line
except FrameSwitch :
break
# Put any remaining data back into the buffer
state . recv_buf = data
|
def refine_pi_cation_laro ( self , all_picat , stacks ) :
"""Just important for constellations with histidine involved . If the histidine ring is positioned in stacking
position to an aromatic ring in the ligand , there is in most cases stacking and pi - cation interaction reported
as histidine also carries a positive charge in the ring . For such cases , only report stacking ."""
|
i_set = [ ]
for picat in all_picat :
exclude = False
for stack in stacks :
if whichrestype ( stack . proteinring . atoms [ 0 ] ) == 'HIS' and picat . ring . obj == stack . ligandring . obj :
exclude = True
if not exclude :
i_set . append ( picat )
return i_set
|
def pitch_shift ( y , sr , n_steps , rbargs = None ) :
'''Apply a pitch shift to an audio time series .
Parameters
y : np . ndarray [ shape = ( n , ) or ( n , c ) ]
Audio time series , either single or multichannel
sr : int > 0
Sampling rate of ` y `
n _ steps : float
Shift by ` n _ steps ` semitones .
rbargs
Additional keyword parameters for rubberband
See ` rubberband - h ` for details .
Returns
y _ shift : np . ndarray
Pitch - shifted audio'''
|
if n_steps == 0 :
return y
if rbargs is None :
rbargs = dict ( )
rbargs . setdefault ( '--pitch' , n_steps )
return __rubberband ( y , sr , ** rbargs )
|
def _genEmptyResults ( self ) :
"""Uses allowed keys to generate a empty dict to start counting from
: return :"""
|
allowedKeys = self . _allowedKeys
keysDict = OrderedDict ( )
# Note : list comprehension take 0 then 2 then 1 then 3 etc for some reason . we want strict order
for k in allowedKeys :
keysDict [ k ] = 0
resultsByClass = keysDict
return resultsByClass
|
def get_xdsl_stats ( self ) :
"""Get all stats about your xDSL connection
: return : A dict with all stats about your xdsl connection ( see API doc )
: rtype : dict"""
|
self . bbox_auth . set_access ( BboxConstant . AUTHENTICATION_LEVEL_PUBLIC , BboxConstant . AUTHENTICATION_LEVEL_PRIVATE )
self . bbox_url . set_api_name ( BboxConstant . API_WAN , "xdsl/stats" )
api = BboxApiCall ( self . bbox_url , BboxConstant . HTTP_METHOD_GET , None , self . bbox_auth )
resp = api . execute_api_request ( )
return resp . json ( ) [ 0 ] [ "wan" ] [ "xdsl" ] [ "stats" ]
|
def to_path_value ( self , obj ) :
"""Takes value and turn it into a string suitable for inclusion in
the path , by url - encoding .
: param obj : object or string value .
: return string : quoted value ."""
|
if type ( obj ) == list :
return ',' . join ( obj )
else :
return str ( obj )
|
def start ( self , request , application , extra_roles = None ) :
"""Continue the state machine at first state ."""
|
# Get the authentication of the current user
roles = self . _get_roles_for_request ( request , application )
if extra_roles is not None :
roles . update ( extra_roles )
# Ensure current user is authenticated . If user isn ' t applicant ,
# leader , delegate or admin , they probably shouldn ' t be here .
if 'is_authorised' not in roles :
return HttpResponseForbidden ( '<h1>Access Denied</h1>' )
# Go to first state .
return self . _next ( request , application , roles , self . _first_state )
|
def filenames ( self ) :
"""list of file names the data is originally being read from .
Returns
names : list of str
list of file names at the beginning of the input chain ."""
|
if self . _is_reader :
assert self . _filenames is not None
return self . _filenames
else :
return self . data_producer . filenames
|
def get_mac_acl_for_intf_input_direction ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
get_mac_acl_for_intf = ET . Element ( "get_mac_acl_for_intf" )
config = get_mac_acl_for_intf
input = ET . SubElement ( get_mac_acl_for_intf , "input" )
direction = ET . SubElement ( input , "direction" )
direction . text = kwargs . pop ( 'direction' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def union ( self , * others : 'Substitution' ) -> 'Substitution' :
"""Try to merge the substitutions .
If a variable occurs in multiple substitutions , try to merge the replacements .
See : meth : ` union _ with _ variable ` to see how replacements are merged .
Does not modify any of the original substitutions .
Example :
> > > subst1 = Substitution ( { ' x ' : Multiset ( [ ' a ' , ' b ' ] ) , ' z ' : a } )
> > > subst2 = Substitution ( { ' x ' : ( ' a ' , ' b ' ) , ' y ' : ( ' c ' , ) } )
> > > print ( subst1 . union ( subst2 ) )
{ x ↦ ( a , b ) , y ↦ ( c ) , z ↦ a }
Args :
others :
The other substitutions to merge with this one .
Returns :
The new substitution with the other substitutions merged .
Raises :
ValueError :
if a variable occurs in multiple substitutions but cannot be merged because the
substitutions conflict ."""
|
new_subst = Substitution ( self )
for other in others :
for variable_name , replacement in other . items ( ) :
new_subst . try_add_variable ( variable_name , replacement )
return new_subst
|
def find_commands ( management_dir ) :
"""Given a path to a management directory , returns a list of all the command
names that are available .
Returns an empty list if no commands are defined ."""
|
command_dir = os . path . join ( management_dir , 'commands' )
try :
return [ f [ : - 3 ] for f in os . listdir ( command_dir ) if not f . startswith ( '_' ) and f . endswith ( '.py' ) ]
except OSError :
return [ ]
|
def isJournal ( self , dbname = abrevDBname , manualDB = manualDBname , returnDict = 'both' , checkIfExcluded = False ) :
"""Returns ` True ` if the ` Citation ` ' s ` journal ` field is a journal abbreviation from the WOS listing found at [ http : / / images . webofknowledge . com / WOK46 / help / WOS / A _ abrvjt . html ] ( http : / / images . webofknowledge . com / WOK46 / help / WOS / A _ abrvjt . html ) , i . e . checks if the citation is citing a journal .
* * Note * * : Requires the [ j9Abbreviations ] ( . . / modules / journalAbbreviations . html # metaknowledge . journalAbbreviations . backend . getj9dict ) database file and will raise an error if it cannot be found .
* * Note * * : All parameters are used for getting the data base with [ getj9dict ] ( . . / modules / journalAbbreviations . html # metaknowledge . journalAbbreviations . backend . getj9dict ) .
# Parameters
_ dbname _ : ` optional [ str ] `
> The name of the downloaded database file , the default is determined at run time . It is recommended that this remain untouched .
_ manualDB _ : ` optional [ str ] `
> The name of the manually created database file , the default is determined at run time . It is recommended that this remain untouched .
_ returnDict _ : ` optional [ str ] `
> default ` ' both ' ` , can be used to get both databases or only one with ` ' WOS ' ` or ` ' manual ' ` .
# Returns
` bool `
> ` True ` if the ` Citation ` is for a journal"""
|
global abbrevDict
if abbrevDict is None :
abbrevDict = getj9dict ( dbname = dbname , manualDB = manualDB , returnDict = returnDict )
if not hasattr ( self , 'journal' ) :
return False
elif checkIfExcluded and self . journal :
try :
if abbrevDict . get ( self . journal , [ True ] ) [ 0 ] :
return False
else :
return True
except IndexError :
return False
else :
if self . journal :
dictVal = abbrevDict . get ( self . journal , [ b'' ] ) [ 0 ]
if dictVal :
return dictVal
else :
return False
else :
return False
|
def load_consumer_metadata_for_group ( self , group ) :
"""Determine broker for the consumer metadata for the specified group
Returns a deferred which callbacks with True if the group ' s coordinator
could be determined , or errbacks with
ConsumerCoordinatorNotAvailableError if not .
Parameters
group :
group name as ` str `"""
|
group = _coerce_consumer_group ( group )
log . debug ( "%r: load_consumer_metadata_for_group(%r)" , self , group )
# If we are already loading the metadata for this group , then
# just return the outstanding deferred
if group in self . coordinator_fetches :
d = defer . Deferred ( )
self . coordinator_fetches [ group ] [ 1 ] . append ( d )
return d
# No outstanding request , create a new one
requestId = self . _next_id ( )
request = KafkaCodec . encode_consumermetadata_request ( self . _clientIdBytes , requestId , group )
# Callbacks for the request deferred . . .
def _handleConsumerMetadataResponse ( response_bytes ) : # Decode the response ( returns ConsumerMetadataResponse )
response = KafkaCodec . decode_consumermetadata_response ( response_bytes )
log . debug ( "%r: load_consumer_metadata_for_group(%r) -> %r" , self , group , response )
if response . error :
raise BrokerResponseError . errnos . get ( response . error , UnknownError ) ( response )
bm = BrokerMetadata ( response . node_id , response . host , response . port )
self . consumer_group_to_brokers [ group ] = bm
self . _update_brokers ( [ bm ] )
return True
def _handleConsumerMetadataErr ( err ) :
log . error ( "Failed to retrieve consumer metadata for group %r" , group , exc_info = ( err . type , err . value , err . getTracebackObject ( ) ) )
# Clear any stored value for the group ' s coordinator
self . reset_consumer_group_metadata ( group )
# FIXME : This exception should chain from err .
raise ConsumerCoordinatorNotAvailableError ( "Coordinator for group {!r} not available" . format ( group ) , )
def _propagate ( result ) :
[ _ , ds ] = self . coordinator_fetches . pop ( group , None )
for d in ds :
d . callback ( result )
# Send the request , add the handlers
request_d = self . _send_broker_unaware_request ( requestId , request )
d = defer . Deferred ( )
# Save the deferred under the fetches for this group
self . coordinator_fetches [ group ] = ( request_d , [ d ] )
request_d . addCallback ( _handleConsumerMetadataResponse )
request_d . addErrback ( _handleConsumerMetadataErr )
request_d . addBoth ( _propagate )
return d
|
def iter_sections ( self , order = Tree . ipreorder , neurite_order = NeuriteIter . FileOrder ) :
'''iteration over section nodes
Parameters :
order : section iteration order within a given neurite . Must be one of :
Tree . ipreorder : Depth - first pre - order iteration of tree nodes
Tree . ipreorder : Depth - first post - order iteration of tree nodes
Tree . iupstream : Iterate from a tree node to the root nodes
Tree . ibifurcation _ point : Iterator to bifurcation points
Tree . ileaf : Iterator to all leaves of a tree
neurite _ order : order upon which neurites should be iterated . Values :
- NeuriteIter . FileOrder : order of appearance in the file
- NeuriteIter . NRN : NRN simulator order : soma - > axon - > basal - > apical'''
|
return iter_sections ( self , iterator_type = order , neurite_order = neurite_order )
|
def vertical_headers ( self , value ) :
"""Setter for * * self . _ _ vertical _ headers * * attribute .
: param value : Attribute value .
: type value : OrderedDict"""
|
if value is not None :
assert type ( value ) is OrderedDict , "'{0}' attribute: '{1}' type is not 'OrderedDict'!" . format ( "vertical_headers" , value )
self . __vertical_headers = value
|
def delete_user ( self , username ) :
"""Deletes a user from the server .
: param string username : Name of the user to delete from the server ."""
|
path = Client . urls [ 'users_by_name' ] % username
return self . _call ( path , 'DELETE' )
|
def HasDateExceptionOn ( self , date , exception_type = _EXCEPTION_TYPE_ADD ) :
"""Test if this service period has a date exception of the given type .
Args :
date : a string of form " YYYYMMDD "
exception _ type : the exception type the date should have . Defaults to
_ EXCEPTION _ TYPE _ ADD
Returns :
True iff this service has service exception of specified type at date ."""
|
if date in self . date_exceptions :
return exception_type == self . date_exceptions [ date ] [ 0 ]
return False
|
def query_nds2 ( cls , name , host = None , port = None , connection = None , type = None ) :
"""Query an NDS server for channel information
Parameters
name : ` str `
name of requested channel
host : ` str ` , optional
name of NDS2 server .
port : ` int ` , optional
port number for NDS2 connection
connection : ` nds2 . connection `
open connection to use for query
type : ` str ` , ` int `
NDS2 channel type with which to restrict query
Returns
channel : ` Channel `
channel with metadata retrieved from NDS2 server
Raises
ValueError
if multiple channels are found for a given name
Notes
. . warning : :
A ` host ` is required if an open ` connection ` is not given"""
|
return ChannelList . query_nds2 ( [ name ] , host = host , port = port , connection = connection , type = type , unique = True ) [ 0 ]
|
def sort_diclist ( undecorated , sort_on ) :
"""Sort a list of dictionaries by the value in each
dictionary for the sorting key
Parameters
undecorated : list of dicts
sort _ on : str , numeric
key that is present in all dicts to sort on
Returns
ordered list of dicts
Examples
> > > lst = [ { ' key1 ' : 10 , ' key2 ' : 2 } , { ' key1 ' : 1 , ' key2 ' : 20 } ]
> > > sort _ diclist ( lst , ' key1 ' )
[ { ' key2 ' : 20 , ' key1 ' : 1 } , { ' key2 ' : 2 , ' key1 ' : 10 } ]
> > > sort _ diclist ( lst , ' key2 ' )
[ { ' key2 ' : 2 , ' key1 ' : 10 } , { ' key2 ' : 20 , ' key1 ' : 1 } ]"""
|
decorated = [ ( len ( dict_ [ sort_on ] ) if hasattr ( dict_ [ sort_on ] , '__len__' ) else dict_ [ sort_on ] , index ) for ( index , dict_ ) in enumerate ( undecorated ) ]
decorated . sort ( )
return [ undecorated [ index ] for ( key , index ) in decorated ]
|
def get_staged_files ( ) :
"""Get all files staged for the current commit ."""
|
proc = subprocess . Popen ( ( 'git' , 'status' , '--porcelain' ) , stdout = subprocess . PIPE , stderr = subprocess . PIPE )
out , _ = proc . communicate ( )
staged_files = modified_re . findall ( out )
return staged_files
|
def children ( self , parent ) :
"""Return set of children of parent .
Parameters
parent : : class : ` katcp . Sensor ` object
Parent whose children to return .
Returns
children : set of : class : ` katcp . Sensor ` objects
The child sensors of parent ."""
|
if parent not in self . _parent_to_children :
raise ValueError ( "Parent sensor %r not in tree." % parent )
return self . _parent_to_children [ parent ] . copy ( )
|
def publish ( self ) :
'''Function to publish cmdvel .'''
|
self . lock . acquire ( )
tw = cmdvel2Twist ( self . data )
self . lock . release ( )
self . pub . publish ( tw )
|
def is_plugin ( plugin ) :
"""Returns true if the plugin implements the ` Plugin ` interface .
: param plugin : The plugin to check .
: returns : True if plugin , False otherwise .
: rtype : bool"""
|
try :
return isinstance ( plugin , Plugin ) or issubclass ( plugin , Plugin )
except TypeError :
return False
|
def get ( self , * args , ** kwargs ) :
"""The base activation logic ; subclasses should leave this method
alone and implement activate ( ) , which is called from this
method ."""
|
extra_context = { }
try :
activated_user = self . activate ( * args , ** kwargs )
except ActivationError as e :
extra_context [ 'activation_error' ] = { 'message' : e . message , 'code' : e . code , 'params' : e . params }
else :
signals . user_activated . send ( sender = self . __class__ , user = activated_user , request = self . request )
return HttpResponseRedirect ( force_text ( self . get_success_url ( activated_user ) ) )
context_data = self . get_context_data ( )
context_data . update ( extra_context )
return self . render_to_response ( context_data )
|
def getOutputColumn ( self , columnAlias ) :
"""Returns a Column ."""
|
result = None
for column in self . __outputColumns :
if column . getColumnAlias ( ) == columnAlias :
result = column
break
return result
|
def close ( self , * args , ** kwargs ) :
"""Engine closed , copy file to DB if it has changed"""
|
super ( DatabaseWrapper , self ) . close ( * args , ** kwargs )
signature_version = self . settings_dict . get ( "SIGNATURE_VERSION" , "s3v4" )
s3 = boto3 . resource ( 's3' , config = botocore . client . Config ( signature_version = signature_version ) , )
try :
with open ( self . settings_dict [ 'NAME' ] , 'rb' ) as f :
fb = f . read ( )
m = hashlib . md5 ( )
m . update ( fb )
if self . db_hash == m . hexdigest ( ) :
logging . debug ( "Database unchanged, not saving to remote DB!" )
return
bytesIO = BytesIO ( )
bytesIO . write ( fb )
bytesIO . seek ( 0 )
s3_object = s3 . Object ( self . settings_dict [ 'BUCKET' ] , self . settings_dict [ 'REMOTE_NAME' ] )
result = s3_object . put ( 'rb' , Body = bytesIO )
except Exception as e :
logging . debug ( e )
logging . debug ( "Saved to remote DB!" )
|
def move ( self , fnames = None , directory = None ) :
"""Move files / directories"""
|
if fnames is None :
fnames = self . get_selected_filenames ( )
orig = fixpath ( osp . dirname ( fnames [ 0 ] ) )
while True :
self . redirect_stdio . emit ( False )
if directory is None :
folder = getexistingdirectory ( self , _ ( "Select directory" ) , orig )
else :
folder = directory
self . redirect_stdio . emit ( True )
if folder :
folder = fixpath ( folder )
if folder != orig :
break
else :
return
for fname in fnames :
basename = osp . basename ( fname )
try :
misc . move_file ( fname , osp . join ( folder , basename ) )
except EnvironmentError as error :
QMessageBox . critical ( self , _ ( "Error" ) , _ ( "<b>Unable to move <i>%s</i></b>" "<br><br>Error message:<br>%s" ) % ( basename , to_text_string ( error ) ) )
|
def flatten ( iterable ) :
"""Fully flattens an iterable :
In : flatten ( [ 1,2,3,4 , [ 5,6 , [ 7,8 ] ] ] )
Out : [ 1,2,3,4,5,6,7,8]"""
|
container = iterable . __class__
placeholder = [ ]
for item in iterable :
try :
placeholder . extend ( flatten ( item ) )
except TypeError :
placeholder . append ( item )
return container ( placeholder )
|
def _get_char ( self , win , char ) :
def get_check_next_byte ( ) :
char = win . getch ( )
if 128 <= char <= 191 :
return char
else :
raise UnicodeError
bytes = [ ]
if char <= 127 : # 1 bytes
bytes . append ( char )
# elif 194 < = char < = 223:
elif 192 <= char <= 223 : # 2 bytes
bytes . append ( char )
bytes . append ( get_check_next_byte ( ) )
elif 224 <= char <= 239 : # 3 bytes
bytes . append ( char )
bytes . append ( get_check_next_byte ( ) )
bytes . append ( get_check_next_byte ( ) )
elif 240 <= char <= 244 : # 4 bytes
bytes . append ( char )
bytes . append ( get_check_next_byte ( ) )
bytes . append ( get_check_next_byte ( ) )
bytes . append ( get_check_next_byte ( ) )
# print ( ' bytes = { } ' . format ( bytes ) )
"""no zero byte allowed"""
|
while 0 in bytes :
bytes . remove ( 0 )
if version_info < ( 3 , 0 ) :
out = '' . join ( [ chr ( b ) for b in bytes ] )
else :
buf = bytearray ( bytes )
out = self . _decode_string ( buf )
# out = buf . decode ( ' utf - 8 ' )
return out
|
def mp_spawn ( self ) :
"""Spawn worker processes ( using multiprocessing )"""
|
processes = [ ]
for x in range ( self . queue_worker_amount ) :
process = multiprocessing . Process ( target = self . mp_worker )
process . start ( )
processes . append ( process )
for process in processes :
process . join ( )
|
def cosh ( x ) :
"""Hyperbolic cosine"""
|
if isinstance ( x , UncertainFunction ) :
mcpts = np . cosh ( x . _mcpts )
return UncertainFunction ( mcpts )
else :
return np . cosh ( x )
|
def has_edge ( self , node1_name , node2_name , account_for_direction = True ) :
"""Proxies a call to the _ _ has _ edge method"""
|
return self . __has_edge ( node1_name = node1_name , node2_name = node2_name , account_for_direction = account_for_direction )
|
def _sort_layers ( self ) :
"""Sort the layers by depth ."""
|
self . _layers = OrderedDict ( sorted ( self . _layers . items ( ) , key = lambda t : t [ 0 ] ) )
|
def percent_point ( self , y , V ) :
"""Compute the inverse of conditional cumulative distribution : math : ` C ( u | v ) ^ - 1 `
Args :
y : ` np . ndarray ` value of : math : ` C ( u | v ) ` .
v : ` np . ndarray ` given value of v ."""
|
self . check_fit ( )
if self . theta < 0 :
return V
else :
a = np . power ( y , self . theta / ( - 1 - self . theta ) )
b = np . power ( V , self . theta )
u = np . power ( ( a + b - 1 ) / b , - 1 / self . theta )
return u
|
def get_existing_item ( self , item ) :
"""Lookup item in remote service based on keys .
: param item : D4S2Item data contains keys we will use for lookup .
: return : requests . Response containing the successful result"""
|
params = { 'project_id' : item . project_id , 'from_user_id' : item . from_user_id , 'to_user_id' : item . to_user_id , }
resp = requests . get ( self . make_url ( item . destination ) , headers = self . json_headers , params = params )
self . check_response ( resp )
return resp
|
def add_leverage ( self ) :
"""Adds leverage term to the model
Returns
None ( changes instance attributes )"""
|
if self . leverage is True :
pass
else :
self . leverage = True
self . z_no += 1
for i in range ( len ( self . X_names ) * 2 + 3 ) :
self . latent_variables . z_list . pop ( )
for parm in range ( len ( self . X_names ) ) :
self . latent_variables . add_z ( 'Vol Beta ' + self . X_names [ parm ] , fam . Normal ( 0 , 10 , transform = None ) , fam . Normal ( 0 , 3 ) )
for parm in range ( len ( self . X_names ) ) :
self . latent_variables . add_z ( 'Returns Beta ' + self . X_names [ parm ] , fam . Normal ( 0 , 10 , transform = None ) , fam . Normal ( 0 , 3 ) )
self . latent_variables . add_z ( 'Leverage Term' , fam . Flat ( transform = None ) , fam . Normal ( 0 , 3 ) )
self . latent_variables . add_z ( 'v' , fam . Flat ( transform = 'exp' ) , fam . Normal ( 0 , 3 ) )
self . latent_variables . add_z ( 'Returns Constant' , fam . Normal ( 0 , 3 , transform = None ) , fam . Normal ( 0 , 3 ) )
self . latent_variables . add_z ( 'GARCH-M' , fam . Normal ( 0 , 3 , transform = None ) , fam . Normal ( 0 , 3 ) )
self . latent_variables . z_list [ - 3 ] . start = 2.0
|
def get_repository_nodes ( self , repository_id , ancestor_levels , descendant_levels , include_siblings ) :
"""Gets a portion of the hierarchy for the given repository .
arg : repository _ id ( osid . id . Id ) : the ` ` Id ` ` to query
arg : ancestor _ levels ( cardinal ) : the maximum number of
ancestor levels to include . A value of 0 returns no
parents in the node .
arg : descendant _ levels ( cardinal ) : the maximum number of
descendant levels to include . A value of 0 returns no
children in the node .
arg : include _ siblings ( boolean ) : ` ` true ` ` to include the
siblings of the given node , ` ` false ` ` to omit the
siblings
return : ( osid . repository . RepositoryNode ) - the specified
repository node
raise : NotFound - ` ` repository _ id ` ` not found
raise : NullArgument - ` ` repository _ id ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
* compliance : mandatory - - This method must be implemented . *"""
|
# Implemented from template for
# osid . resource . BinHierarchySession . get _ bin _ nodes
return objects . RepositoryNode ( self . get_repository_node_ids ( repository_id = repository_id , ancestor_levels = ancestor_levels , descendant_levels = descendant_levels , include_siblings = include_siblings ) . _my_map , runtime = self . _runtime , proxy = self . _proxy )
|
def sub_retab ( match ) :
r"""Remove all tabs and convert them into spaces .
PARAMETERS :
match - - regex match ; uses re _ retab pattern : \ 1 is text before tab ,
\2 is a consecutive string of tabs .
A simple substitution of 4 spaces would result in the following :
to \ tlive # original
to live # simple substitution
Instead , we convert tabs like the following :
to \ tlive # original
to live # the tab * looks * like two spaces , so we convert
# it to two spaces"""
|
before = match . group ( 1 )
tabs = len ( match . group ( 2 ) )
return before + ( ' ' * ( TAB_SIZE * tabs - len ( before ) % TAB_SIZE ) )
|
def on_peer_down ( self , peer ) :
"""Peer down handler .
Cleans up the paths in global tables that was received from this peer ."""
|
LOG . debug ( 'Cleaning obsolete paths whose source/version: %s/%s' , peer . ip_address , peer . version_num )
# Launch clean - up for each global tables .
self . _table_manager . clean_stale_routes ( peer )
|
def get_server_networks ( self , network , public = False , private = False , key = None ) :
"""Creates the dict of network UUIDs required by Cloud Servers when
creating a new server with isolated networks . By default , the UUID
values are returned with the key of " net - id " , which is what novaclient
expects . Other tools may require different values , such as ' uuid ' . If
that is the case , pass the desired key as the ' key ' parameter .
By default only this network is included . If you wish to create a
server that has either the public ( internet ) or private ( ServiceNet )
networks , you have to pass those parameters in with values of True ."""
|
return _get_server_networks ( network , public = public , private = private , key = key )
|
def __add_delayed_assert_failure ( self ) :
"""Add a delayed _ assert failure into a list for future processing ."""
|
current_url = self . driver . current_url
message = self . __get_exception_message ( )
self . __delayed_assert_failures . append ( "CHECK #%s: (%s)\n %s" % ( self . __delayed_assert_count , current_url , message ) )
|
def run ( xmin , ymin , xmax , ymax , step , range_ , range_x , range_y , t ) :
pt = numpy . zeros ( ( range_x , range_y ) )
"omp parallel for private ( i , j , k , tmp )"
|
for i in xrange ( range_x ) :
for j in xrange ( range_y ) :
xi , yj = xmin + step * i , ymin + step * j
for k in xrange ( t . shape [ 0 ] ) :
tmp = 6368. * math . acos ( math . cos ( xi ) * math . cos ( t [ k , 0 ] ) * math . cos ( ( yj ) - t [ k , 1 ] ) + math . sin ( xi ) * math . sin ( t [ k , 0 ] ) )
if tmp < range_ :
pt [ i , j ] += t [ k , 2 ] / ( 1 + tmp )
return pt
|
def modify_model_backprop ( model , backprop_modifier ) :
"""Creates a copy of model by modifying all activations to use a custom op to modify the backprop behavior .
Args :
model : The ` keras . models . Model ` instance .
backprop _ modifier : One of ` { ' guided ' , ' rectified ' } `
Returns :
A copy of model with modified activations for backwards pass ."""
|
# The general strategy is as follows :
# - Save original model so that upstream callers don ' t see unexpected results with their models .
# - Call backend specific function that registers the custom op and loads the model under modified context manager .
# - Maintain cache to save this expensive process on subsequent calls .
# - Load model with custom context modifying backprop behavior .
# The reason for this round about way is because the graph needs to be rebuild when any of its layer builder
# functions are changed . This is very complicated to do in Keras and makes the implementation very tightly bound
# with keras internals . By saving and loading models , we dont have to worry about future compatibility .
# The only exception to this is the way advanced activations are handled which makes use of some keras internal
# knowledge and might break in the future .
# ADD on 22 Jul 2018:
# In fact , it has broken . Currently , advanced activations are not supported .
# 0 . Retrieve from cache if previously computed .
modified_model = _MODIFIED_MODEL_CACHE . get ( ( model , backprop_modifier ) )
if modified_model is not None :
return modified_model
model_path = os . path . join ( tempfile . gettempdir ( ) , next ( tempfile . _get_candidate_names ( ) ) + '.h5' )
try : # 1 . Save original model
model . save ( model_path )
# 2 . Register modifier and load modified model under custom context .
modifier_fn = _BACKPROP_MODIFIERS . get ( backprop_modifier )
if modifier_fn is None :
raise ValueError ( "'{}' modifier is not supported" . format ( backprop_modifier ) )
modifier_fn ( backprop_modifier )
# 3 . Create graph under custom context manager .
with tf . get_default_graph ( ) . gradient_override_map ( { 'Relu' : backprop_modifier } ) : # This should rebuild graph with modifications .
modified_model = load_model ( model_path )
# Cache to improve subsequent call performance .
_MODIFIED_MODEL_CACHE [ ( model , backprop_modifier ) ] = modified_model
return modified_model
finally :
os . remove ( model_path )
|
def create_source ( self , datapusher = True ) :
"""Populate ckan directory from preloaded image and copy
who . ini and schema . xml info conf directory"""
|
task . create_source ( self . target , self . _preload_image ( ) , datapusher )
|
def getSettingsPath ( ) :
"""Returns the path where the settings are stored"""
|
parser = SafeConfigParser ( )
try :
parser . read ( os . path . normpath ( pyGeno_SETTINGS_DIR + '/config.ini' ) )
return parser . get ( 'pyGeno_config' , 'settings_dir' )
except :
createDefaultConfigFile ( )
return getSettingsPath ( )
|
def post_op ( self , id : str , path_data : Union [ dict , None ] , post_data : Any ) -> dict :
"""Modifies the ESI by looking up an operation id .
Args :
path : raw ESI URL path
path _ data : data to format the path with ( can be None )
post _ data : data to send to ESI
Returns :
ESI data"""
|
path = self . _get_path_for_op_id ( id )
return self . post_path ( path , path_data , post_data )
|
def Connect ( self , Username , WaitConnected = False ) :
"""Connects application to user .
: Parameters :
Username : str
Name of the user to connect to .
WaitConnected : bool
If True , causes the method to wait until the connection is established .
: return : If ` ` WaitConnected ` ` is True , returns the stream which can be used to send the
data . Otherwise returns None .
: rtype : ` ApplicationStream ` or None"""
|
if WaitConnected :
self . _Connect_Event = threading . Event ( )
self . _Connect_Stream = [ None ]
self . _Connect_Username = Username
self . _Connect_ApplicationStreams ( self , self . Streams )
self . _Owner . RegisterEventHandler ( 'ApplicationStreams' , self . _Connect_ApplicationStreams )
self . _Alter ( 'CONNECT' , Username )
self . _Connect_Event . wait ( )
self . _Owner . UnregisterEventHandler ( 'ApplicationStreams' , self . _Connect_ApplicationStreams )
try :
return self . _Connect_Stream [ 0 ]
finally :
del self . _Connect_Stream , self . _Connect_Event , self . _Connect_Username
else :
self . _Alter ( 'CONNECT' , Username )
|
def _find_ssh_exe ( ) :
'''Windows only : search for Git ' s bundled ssh . exe in known locations'''
|
# Known locations for Git ' s ssh . exe in Windows
globmasks = [ os . path . join ( os . getenv ( 'SystemDrive' ) , os . sep , 'Program Files*' , 'Git' , 'usr' , 'bin' , 'ssh.exe' ) , os . path . join ( os . getenv ( 'SystemDrive' ) , os . sep , 'Program Files*' , 'Git' , 'bin' , 'ssh.exe' ) ]
for globmask in globmasks :
ssh_exe = glob . glob ( globmask )
if ssh_exe and os . path . isfile ( ssh_exe [ 0 ] ) :
ret = ssh_exe [ 0 ]
break
else :
ret = None
return ret
|
def parse ( path ) :
"""Parse URL path and convert it to regexp if needed ."""
|
parsed = re . sre_parse . parse ( path )
for case , _ in parsed :
if case not in ( re . sre_parse . LITERAL , re . sre_parse . ANY ) :
break
else :
return path
path = path . strip ( '^$' )
def parse_ ( match ) :
[ part ] = match . groups ( )
match = DYNR_RE . match ( part )
params = match . groupdict ( )
return '(?P<%s>%s)' % ( params [ 'var' ] , params [ 're' ] or '[^{}/]+' )
return re . compile ( '^%s$' % DYNS_RE . sub ( parse_ , path ) )
|
def _parse_lsb_release_content ( lines ) :
"""Parse the output of the lsb _ release command .
Parameters :
* lines : Iterable through the lines of the lsb _ release output .
Each line must be a unicode string or a UTF - 8 encoded byte
string .
Returns :
A dictionary containing all information items ."""
|
props = { }
for line in lines :
kv = line . strip ( '\n' ) . split ( ':' , 1 )
if len ( kv ) != 2 : # Ignore lines without colon .
continue
k , v = kv
props . update ( { k . replace ( ' ' , '_' ) . lower ( ) : v . strip ( ) } )
return props
|
def cropped ( self , t0 , t1 ) :
"""returns a cropped copy of this segment which starts at
self . point ( t0 ) and ends at self . point ( t1 ) ."""
|
return Line ( self . point ( t0 ) , self . point ( t1 ) )
|
def runProcess ( cmd , * args ) :
"""Run ` cmd ` ( which is searched for in the executable path ) with ` args ` and
return the exit status .
In general ( unless you know what you ' re doing ) use : :
runProcess ( ' program ' , filename )
rather than : :
os . system ( ' program % s ' % filename )
because the latter will not work as expected if ` filename ` contains
spaces or shell - metacharacters .
If you need more fine - grained control look at ` ` os . spawn * ` ` ."""
|
from os import spawnvp , P_WAIT
return spawnvp ( P_WAIT , cmd , ( cmd , ) + args )
|
def execution_engine_model_changed ( self , model , prop_name , info ) :
"""High light active state machine ."""
|
notebook = self . view [ 'notebook' ]
active_state_machine_id = self . model . state_machine_manager . active_state_machine_id
if active_state_machine_id is None : # un - mark all state machine that are marked with execution - running style class
for tab in self . tabs . values ( ) :
label = notebook . get_tab_label ( tab [ 'page' ] ) . get_child ( ) . get_children ( ) [ 0 ]
if label . get_style_context ( ) . has_class ( constants . execution_running_style_class ) :
label . get_style_context ( ) . remove_class ( constants . execution_running_style_class )
else : # mark active state machine with execution - running style class
page = self . get_page_for_state_machine_id ( active_state_machine_id )
if page :
label = notebook . get_tab_label ( page ) . get_child ( ) . get_children ( ) [ 0 ]
label . get_style_context ( ) . add_class ( constants . execution_running_style_class )
|
def add ( self , rule : ControlRule = None , * , supply : float ) :
"""Register a new rule above a given ` ` supply ` ` threshold
Registration supports a single - argument form for use as a decorator ,
as well as a two - argument form for direct application .
Use the former for ` ` def ` ` or ` ` class ` ` definitions ,
and the later for ` ` lambda ` ` functions and existing callables .
. . code : : python
@ control . add ( supply = 10)
def linear ( pool , interval ) :
if pool . utilisation < 0.75:
return pool . supply - interval
elif pool . allocation > 0.95:
return pool . supply + interval
control . add (
lambda pool , interval : pool . supply * ( 1.2 if pool . allocation > 0.75 else 0.9 ) ,
supply = 100"""
|
if supply in self . _thresholds :
raise ValueError ( 'rule for threshold %s re-defined' % supply )
if rule is not None :
self . rules . append ( ( supply , rule ) )
self . _thresholds . add ( supply )
return rule
else :
return partial ( self . add , supply = supply )
|
def mac_address_table_static_vlanid ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
mac_address_table = ET . SubElement ( config , "mac-address-table" , xmlns = "urn:brocade.com:mgmt:brocade-mac-address-table" )
static = ET . SubElement ( mac_address_table , "static" )
mac_address_key = ET . SubElement ( static , "mac-address" )
mac_address_key . text = kwargs . pop ( 'mac_address' )
forward_key = ET . SubElement ( static , "forward" )
forward_key . text = kwargs . pop ( 'forward' )
interface_type_key = ET . SubElement ( static , "interface-type" )
interface_type_key . text = kwargs . pop ( 'interface_type' )
interface_name_key = ET . SubElement ( static , "interface-name" )
interface_name_key . text = kwargs . pop ( 'interface_name' )
vlan_key = ET . SubElement ( static , "vlan" )
vlan_key . text = kwargs . pop ( 'vlan' )
vlanid = ET . SubElement ( static , "vlanid" )
vlanid . text = kwargs . pop ( 'vlanid' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def highest_expr_genes ( adata , n_top = 30 , show = None , save = None , ax = None , gene_symbols = None , ** kwds ) :
"""Fraction of counts assigned to each gene over all cells .
Computes , for each gene , the fraction of counts assigned to that gene within
a cell . The ` n _ top ` genes with the highest mean fraction over all cells are
plotted as boxplots .
This plot is similar to the ` scater ` package function ` plotHighestExprs ( type
= " highest - expression " ) ` , see ` here
< https : / / bioconductor . org / packages / devel / bioc / vignettes / scater / inst / doc / vignette - qc . html > ` _ _ . Quoting
from there :
* We expect to see the “ usual suspects ” , i . e . , mitochondrial genes , actin ,
ribosomal protein , MALAT1 . A few spike - in transcripts may also be
present here , though if all of the spike - ins are in the top 50 , it
suggests that too much spike - in RNA was added . A large number of
pseudo - genes or predicted genes may indicate problems with alignment . *
- - Davis McCarthy and Aaron Lun
Parameters
adata : : class : ` ~ anndata . AnnData `
Annotated data matrix .
n _ top : ` int ` , optional ( default : 30)
Number of top
{ show _ save _ ax }
gene _ symbols : ` str ` , optional ( default : None )
Key for field in . var that stores gene symbols if you do not want to use . var _ names .
* * kwds : keyword arguments
Are passed to ` seaborn . boxplot ` .
Returns
If ` show = = False ` a : class : ` ~ matplotlib . axes . Axes ` ."""
|
from scipy . sparse import issparse
# compute the percentage of each gene per cell
dat = normalize_per_cell ( adata , counts_per_cell_after = 100 , copy = True )
# identify the genes with the highest mean
if issparse ( dat . X ) :
dat . var [ 'mean_percent' ] = dat . X . mean ( axis = 0 ) . A1
else :
dat . var [ 'mean_percent' ] = dat . X . mean ( axis = 0 )
top = dat . var . sort_values ( 'mean_percent' , ascending = False ) . index [ : n_top ]
dat = dat [ : , top ]
columns = dat . var_names if gene_symbols is None else dat . var [ gene_symbols ]
dat = pd . DataFrame ( dat . X . toarray ( ) , index = dat . obs_names , columns = columns )
if not ax : # figsize is hardcoded to produce a tall image . To change the fig size ,
# a matplotlib . axes . Axes object needs to be passed .
height = ( n_top * 0.2 ) + 1.5
fig , ax = plt . subplots ( figsize = ( 5 , height ) )
sns . boxplot ( data = dat , orient = 'h' , ax = ax , fliersize = 1 , ** kwds )
ax . set_xlabel ( '% of total counts' )
utils . savefig_or_show ( 'highest_expr_genes' , show = show , save = save )
return ax if show == False else None
|
def _get_django_sites ( ) :
"""Get a list of sites as dictionaries { site _ id : ' domain . name ' }"""
|
deployed = version_state ( 'deploy_project' )
if not env . sites and 'django.contrib.sites' in env . INSTALLED_APPS and deployed :
with cd ( '/' . join ( [ deployment_root ( ) , 'env' , env . project_fullname , 'project' , env . project_package_name , 'sitesettings' ] ) ) :
venv = '/' . join ( [ deployment_root ( ) , 'env' , env . project_fullname , 'bin' , 'activate' ] )
# since this is the first time we run . / manage . py on the server it can be
# a point of failure for installations
with settings ( warn_only = True ) :
output = run ( ' ' . join ( [ 'source' , venv , '&&' , "django-admin.py dumpdata sites --settings=%s.sitesettings.settings" % env . project_package_name ] ) )
if output . failed :
print "ERROR: There was an error running ./manage.py on the node"
print "See the troubleshooting docs for hints on how to diagnose deployment issues"
if hasattr ( output , 'stderr' ) :
print output . stderr
sys . exit ( 1 )
output = output . split ( '\n' ) [ - 1 ]
# ignore any lines prior to the data being dumped
sites = json . loads ( output )
env . sites = { }
for s in sites :
env . sites [ s [ 'pk' ] ] = s [ 'fields' ] [ 'domain' ]
return env . sites
|
def _encode ( self ) :
"""Encode the message and return a bytestring ."""
|
data = ByteBuffer ( )
if not hasattr ( self , '__fields__' ) :
return data . tostring ( )
for field in self . __fields__ :
field . encode ( self , data )
return data . tostring ( )
|
def get_credentials ( ) :
"""Get the credentials to use . We try application credentials first , followed by
user credentials . The path to the application credentials can be overridden
by pointing the GOOGLE _ APPLICATION _ CREDENTIALS environment variable to some file ;
the path to the user credentials can be overridden by pointing the CLOUDSDK _ CONFIG
environment variable to some directory ( after which we will look for the file
$ CLOUDSDK _ CONFIG / gcloud / credentials ) . Unless you have specific reasons for
overriding these the defaults should suffice ."""
|
try :
credentials , _ = google . auth . default ( )
credentials = google . auth . credentials . with_scopes_if_required ( credentials , CREDENTIAL_SCOPES )
return credentials
except Exception as e : # Try load user creds from file
cred_file = get_config_dir ( ) + '/credentials'
if os . path . exists ( cred_file ) :
with open ( cred_file ) as f :
creds = json . loads ( f . read ( ) )
# Use the first gcloud one we find
for entry in creds [ 'data' ] :
if entry [ 'key' ] [ 'type' ] == 'google-cloud-sdk' :
creds = oauth2client . client . OAuth2Credentials . from_json ( json . dumps ( entry [ 'credential' ] ) )
return _convert_oauth2client_creds ( creds )
if type ( e ) == google . auth . exceptions . DefaultCredentialsError : # If we are in Datalab container , change the message to be about signing in .
if _in_datalab_docker ( ) :
raise Exception ( 'No application credentials found. Perhaps you should sign in.' )
raise e
|
def datasetScalarTimeStepChunk ( lines , numberColumns , numberCells ) :
"""Process the time step chunks for scalar datasets"""
|
END_DATASET_TAG = 'ENDDS'
# Define the result object
result = { 'iStatus' : None , 'timestamp' : None , 'cellArray' : None , 'rasterText' : None }
# Split the chunks
timeStep = pt . splitLine ( lines . pop ( 0 ) )
# Extract cells , ignoring the status indicators
startCellsIndex = numberCells
# Handle case when status cells are not included ( istat = 0)
iStatus = int ( timeStep [ 1 ] )
if iStatus == 0 :
startCellsIndex = 0
# Strip off ending dataset tag
if END_DATASET_TAG in lines [ - 1 ] :
lines . pop ( - 1 )
# Assemble the array string
arrayString = '[['
columnCounter = 1
lenLines = len ( lines ) - 1
# Also assemble raster text field to preserve for spatial datasets
rasterText = ''
for index in range ( startCellsIndex , len ( lines ) ) : # Check columns condition
if columnCounter % numberColumns != 0 and index != lenLines :
arrayString += lines [ index ] . strip ( ) + ', '
elif columnCounter % numberColumns == 0 and index != lenLines :
arrayString += lines [ index ] . strip ( ) + '], ['
elif index == lenLines :
arrayString += lines [ index ] . strip ( ) + ']]'
# Advance counter
columnCounter += 1
rasterText += lines [ index ]
# Get Value Array
result [ 'cellArray' ] = arrayString
result [ 'rasterText' ] = rasterText
# Assign Result
result [ 'iStatus' ] = iStatus
result [ 'timestamp' ] = float ( timeStep [ 2 ] )
return result
|
def uncshare ( self ) :
"""The UNC mount point for this path .
This is empty for paths on local drives ."""
|
unc , r = self . module . splitunc ( self )
return self . _next_class ( unc )
|
def error ( self , msg , indent = 0 , ** kwargs ) :
"""invoke ` ` self . logger . error ` `"""
|
return self . logger . error ( self . _indent ( msg , indent ) , ** kwargs )
|
def X_less ( self ) :
"""Zoom out on the x - axis ."""
|
self . parent . value ( 'window_length' , self . parent . value ( 'window_length' ) / 2 )
self . parent . overview . update_position ( )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.