signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def insertDataset ( self , dataset ) :
"""Inserts the specified dataset into this repository ."""
|
try :
models . Dataset . create ( id = dataset . getId ( ) , name = dataset . getLocalId ( ) , description = dataset . getDescription ( ) , attributes = json . dumps ( dataset . getAttributes ( ) ) )
except Exception :
raise exceptions . DuplicateNameException ( dataset . getLocalId ( ) )
|
def recenter ( positions ) :
'''Returns a list of new positions centered around the origin .'''
|
( x0 , y0 , z0 ) , ( x1 , y1 , z1 ) = bounding_box ( positions )
dx = x1 - ( x1 - x0 ) / 2.0
dy = y1 - ( y1 - y0 ) / 2.0
dz = z1 - ( z1 - z0 ) / 2.0
result = [ ]
for x , y , z in positions :
result . append ( ( x - dx , y - dy , z - dz ) )
return result
|
def versions_request ( self ) :
"""List Available REST API Versions"""
|
ret = self . handle_api_exceptions ( 'GET' , '' , api_ver = '' )
return [ str_dict ( x ) for x in ret . json ( ) ]
|
def afw_nonemptiness_check ( afw : dict ) -> bool :
"""Checks if the input AFW reads any language other than the
empty one , returning True / False .
The afw is translated into a nfa and then its nonemptiness is
checked .
: param dict afw : input AFW .
: return : * ( bool ) * , True if input afw is nonempty , False otherwise ."""
|
nfa = afw_to_nfa_conversion ( afw )
return NFA . nfa_nonemptiness_check ( nfa )
|
def _special_value_autocomplete ( em ) :
'''handle " autocomplete " property , which has different behaviour for form vs input "'''
|
if em . tagName == 'form' :
return convertPossibleValues ( em . getAttribute ( 'autocomplete' , 'on' ) , POSSIBLE_VALUES_ON_OFF , invalidDefault = 'on' , emptyValue = EMPTY_IS_INVALID )
# else : input
return convertPossibleValues ( em . getAttribute ( 'autocomplete' , '' ) , POSSIBLE_VALUES_ON_OFF , invalidDefault = "" , emptyValue = '' )
|
def stats ( self ) :
"""The current status of the positions .
Returns
stats : PositionStats
The current stats position stats .
Notes
This is cached , repeated access will not recompute the stats until
the stats may have changed ."""
|
if self . _dirty_stats :
calculate_position_tracker_stats ( self . positions , self . _stats )
self . _dirty_stats = False
return self . _stats
|
def validate_row_lengths ( fields , # type : Sequence [ FieldSpec ]
data # type : Sequence [ Sequence [ str ] ]
) : # type : ( . . . ) - > None
"""Validate the ` data ` row lengths according to the specification
in ` fields ` .
: param fields : The ` FieldSpec ` objects forming the
specification .
: param data : The rows to check .
: raises FormatError : When the number of entries in a row does
not match expectation ."""
|
for i , row in enumerate ( data ) :
if len ( fields ) != len ( row ) :
msg = 'Row {} has {} entries when {} are expected.' . format ( i , len ( row ) , len ( fields ) )
raise FormatError ( msg )
|
def find_model_dat ( ) :
"""Find the file containing the definition of all the models in Xspec
( model . dat ) and return its path"""
|
# model . dat is in $ HEADAS / . . / spectral
headas_env = os . environ . get ( "HEADAS" )
assert headas_env is not None , ( "You need to setup the HEADAS variable before importing this module." " See Heasoft documentation." )
# Expand all variables and other things like ~
headas_env = os . path . expandvars ( os . path . expanduser ( headas_env ) )
# Lazy check that it exists
assert os . path . exists ( headas_env ) , "The HEADAS env. variable point to a non-existent directory: %s" % ( headas_env )
# Get one directory above HEADAS ( i . e . , $ HEADAS / . . )
inferred_path = os . path . dirname ( headas_env )
# Now model . dat should be in $ HEADAS / . . / spectral / manager
final_path = os . path . join ( inferred_path , 'spectral' , 'manager' , 'model.dat' )
# Check that model . dat exists
assert os . path . exists ( final_path ) , "Cannot find Xspec model definition file %s" % ( final_path )
return os . path . abspath ( final_path )
|
def signature ( secret , parts ) :
"""Generates a signature . All strings are assumed to be utf - 8"""
|
if not isinstance ( secret , six . binary_type ) :
secret = secret . encode ( 'utf-8' )
newparts = [ ]
for part in parts :
if not isinstance ( part , six . binary_type ) :
part = part . encode ( 'utf-8' )
newparts . append ( part )
parts = newparts
if sys . version_info >= ( 2 , 5 ) :
csum = hmac . new ( secret , digestmod = hashlib . sha1 )
else :
csum = hmac . new ( secret , digestmod = sha )
for part in parts :
csum . update ( part )
return csum . hexdigest ( )
|
def deleteDenylistAddress ( self , * args , ** kwargs ) :
"""Delete Denylisted Address
Delete the specified address from the notification denylist .
This method takes input : ` ` v1 / notification - address . json # ` `
This method is ` ` experimental ` `"""
|
return self . _makeApiCall ( self . funcinfo [ "deleteDenylistAddress" ] , * args , ** kwargs )
|
def registerAccountResponse ( self , person , vendorSpecific = None ) :
"""CNIdentity . registerAccount ( session , person ) → Subject
https : / / releases . dataone . org / online / api -
documentation - v2.0.1 / apis / CN _ APIs . html # CNIdentity . registerAccount .
Args :
person :
vendorSpecific :
Returns :"""
|
mmp_dict = { 'person' : ( 'person.xml' , person . toxml ( 'utf-8' ) ) }
return self . POST ( 'accounts' , fields = mmp_dict , headers = vendorSpecific )
|
def _download_extract_archive ( self , url ) :
"""Returns dict with 2 extracted filenames"""
|
self . logger . info ( 'Downloading zipfile from ipgeobase.ru...' )
temp_dir = tempfile . mkdtemp ( )
archive = zipfile . ZipFile ( self . _download_url_to_string ( url ) )
self . logger . info ( 'Extracting files...' )
file_cities = archive . extract ( settings . IPGEOBASE_CITIES_FILENAME , path = temp_dir )
file_cidr = archive . extract ( settings . IPGEOBASE_CIDR_FILENAME , path = temp_dir )
return { 'cities' : file_cities , 'cidr' : file_cidr }
|
def drawdowns ( returns , geometric = True ) :
"""compute the drawdown series for the period return series
return : periodic return Series or DataFrame"""
|
wealth = 1. + returns_cumulative ( returns , geometric = geometric , expanding = True )
values = wealth . values
if values . ndim == 2 :
ncols = values . shape [ - 1 ]
values = np . vstack ( ( [ 1. ] * ncols , values ) )
maxwealth = pd . expanding_max ( values ) [ 1 : ]
dds = wealth / maxwealth - 1.
dds [ dds > 0 ] = 0
# Can happen if first returns are positive
return dds
elif values . ndim == 1 :
values = np . hstack ( ( [ 1. ] , values ) )
maxwealth = pd . expanding_max ( values ) [ 1 : ]
dds = wealth / maxwealth - 1.
dds [ dds > 0 ] = 0
# Can happen if first returns are positive
return dds
else :
raise ValueError ( 'unable to process array with %s dimensions' % values . ndim )
|
def keypress ( self , event ) :
"""Allow keys typed in widget to select items"""
|
try :
self . choice . set ( self . shortcuts [ event . keysym ] )
except KeyError : # key not found ( probably a bug , since we intend to catch
# only events from shortcut keys , but ignore it anyway )
pass
|
async def connect ( self , retry = 2 ) :
"""Connect to Mill ."""
|
# pylint : disable = too - many - return - statements
url = API_ENDPOINT_1 + 'login'
headers = { "Content-Type" : "application/x-zc-object" , "Connection" : "Keep-Alive" , "X-Zc-Major-Domain" : "seanywell" , "X-Zc-Msg-Name" : "millService" , "X-Zc-Sub-Domain" : "milltype" , "X-Zc-Seq-Id" : "1" , "X-Zc-Version" : "1" , }
payload = { "account" : self . _username , "password" : self . _password }
try :
with async_timeout . timeout ( self . _timeout ) :
resp = await self . websession . post ( url , data = json . dumps ( payload ) , headers = headers )
except ( asyncio . TimeoutError , aiohttp . ClientError ) :
if retry < 1 :
_LOGGER . error ( "Error connecting to Mill" , exc_info = True )
return False
return await self . connect ( retry - 1 )
result = await resp . text ( )
if '"errorCode":3504' in result :
_LOGGER . error ( 'Wrong password' )
return False
if '"errorCode":3501' in result :
_LOGGER . error ( 'Account does not exist' )
return False
data = json . loads ( result )
token = data . get ( 'token' )
if token is None :
_LOGGER . error ( 'No token' )
return False
user_id = data . get ( 'userId' )
if user_id is None :
_LOGGER . error ( 'No user id' )
return False
self . _token = token
self . _user_id = user_id
return True
|
def fbeta ( y_pred : Tensor , y_true : Tensor , thresh : float = 0.2 , beta : float = 2 , eps : float = 1e-9 , sigmoid : bool = True ) -> Rank0Tensor :
"Computes the f _ beta between ` preds ` and ` targets `"
|
beta2 = beta ** 2
if sigmoid :
y_pred = y_pred . sigmoid ( )
y_pred = ( y_pred > thresh ) . float ( )
y_true = y_true . float ( )
TP = ( y_pred * y_true ) . sum ( dim = 1 )
prec = TP / ( y_pred . sum ( dim = 1 ) + eps )
rec = TP / ( y_true . sum ( dim = 1 ) + eps )
res = ( prec * rec ) / ( prec * beta2 + rec + eps ) * ( 1 + beta2 )
return res . mean ( )
|
def getScreenRGB ( self , screen_data = None ) :
"""This function fills screen _ data with the data
screen _ data MUST be a numpy array of uint32 / int32 . This can be initialized like so :
screen _ data = np . array ( w * h , dtype = np . uint32)
Notice , it must be width * height in size also
If it is None , then this function will initialize it"""
|
if ( screen_data is None ) :
width = ale_lib . getScreenWidth ( self . obj )
height = ale_lib . getScreenWidth ( self . obj )
screen_data = np . zeros ( width * height , dtype = np . uint32 )
ale_lib . getScreenRGB ( self . obj , as_ctypes ( screen_data ) )
return screen_data
|
def tree_to_nodes ( tree , context = None , metadata = None ) :
"""Assembles ` ` tree ` ` nodes into object models .
If ` ` context ` ` is supplied , it will be used to contextualize
the contents of the nodes . Metadata will pass non - node identifying
values down to child nodes , if not overridden ( license , timestamps , etc )"""
|
nodes = [ ]
for item in tree [ 'contents' ] :
if 'contents' in item :
sub_nodes = tree_to_nodes ( item , context = context , metadata = metadata )
if metadata is None :
metadata = { }
else :
metadata = metadata . copy ( )
for key in ( 'title' , 'id' , 'shortid' , 'cnx-archive-uri' , 'cnx-archive-shortid' ) :
if key in metadata :
metadata . pop ( key )
for key in ( 'title' , 'id' , 'shortId' ) :
if item . get ( key ) :
metadata [ key ] = item [ key ]
if item [ key ] != 'subcol' :
if key == 'id' :
metadata [ 'cnx-archive-uri' ] = item [ key ]
elif key == 'shortId' :
metadata [ 'cnx-archive-shortid' ] = item [ key ]
titles = _title_overrides_from_tree ( item )
if item . get ( 'id' ) is not None :
tbinder = cnxepub . Binder ( item . get ( 'id' ) , sub_nodes , metadata = metadata , title_overrides = titles )
else :
tbinder = cnxepub . TranslucentBinder ( sub_nodes , metadata = metadata , title_overrides = titles )
nodes . append ( tbinder )
else :
doc = document_factory ( item [ 'id' ] , context = context )
for key in ( 'title' , 'id' , 'shortId' ) :
if item . get ( key ) :
doc . metadata [ key ] = item [ key ]
if key == 'id' :
doc . metadata [ 'cnx-archive-uri' ] = item [ key ]
elif key == 'shortId' :
doc . metadata [ 'cnx-archive-shortid' ] = item [ key ]
nodes . append ( doc )
return nodes
|
def unicode_body ( self , ignore_errors = True , fix_special_entities = True ) :
"""Return response body as unicode string ."""
|
if not self . _unicode_body :
self . _unicode_body = self . convert_body_to_unicode ( body = self . body , bom = self . bom , charset = self . charset , ignore_errors = ignore_errors , fix_special_entities = fix_special_entities , )
return self . _unicode_body
|
def calculate_checksum ( self ) :
"""Calculate ISBN checksum .
Returns :
` ` str ` ` : ISBN checksum value"""
|
if len ( self . isbn ) in ( 9 , 12 ) :
return calculate_checksum ( self . isbn )
else :
return calculate_checksum ( self . isbn [ : - 1 ] )
|
def __get_query_filters ( cls , filters = { } , inverse = False ) :
"""Convert a dict with the filters to be applied ( { " name1 " : " value1 " , " name2 " : " value2 " } )
to a list of query objects which can be used together in a query using boolean
combination logic .
: param filters : dict with the filters to be applied
: param inverse : if True include all the inverse filters ( the one starting with * )
: return : a list of es _ dsl ' MatchPhrase ' Query objects
Ex : [ MatchPhrase ( name1 = " value1 " ) , MatchPhrase ( name2 = " value2 " ) , . . ]
Dict representation of the object : { ' match _ phrase ' : { ' field ' : ' home ' } }"""
|
query_filters = [ ]
for name in filters :
if name [ 0 ] == '*' and not inverse : # An inverse filter and not inverse mode
continue
if name [ 0 ] != '*' and inverse : # A direct filter and inverse mode
continue
field_name = name [ 1 : ] if name [ 0 ] == '*' else name
params = { field_name : filters [ name ] }
# trying to use es _ dsl only and not creating hard coded queries
query_filters . append ( Q ( 'match_phrase' , ** params ) )
return query_filters
|
def mim2reg ( mimfile , regfile ) :
"""Convert a MIMAS region ( . mim ) file into a DS9 region ( . reg ) file .
Parameters
mimfile : str
Input file in MIMAS format .
regfile : str
Output file ."""
|
region = Region . load ( mimfile )
region . write_reg ( regfile )
logging . info ( "Converted {0} -> {1}" . format ( mimfile , regfile ) )
return
|
def format_search ( q , ** kwargs ) :
'''Formats the results of a search'''
|
m = search ( q , ** kwargs )
count = m [ 'count' ]
if not count :
raise DapiCommError ( 'Could not find any DAP packages for your query.' )
return
for mdap in m [ 'results' ] :
mdap = mdap [ 'content_object' ]
return _format_dap_with_description ( mdap )
|
def save ( self , path_info , checksum ) :
"""Save checksum for the specified path info .
Args :
path _ info ( dict ) : path _ info to save checksum for .
checksum ( str ) : checksum to save ."""
|
assert path_info [ "scheme" ] == "local"
assert checksum is not None
path = path_info [ "path" ]
assert os . path . exists ( path )
actual_mtime , actual_size = get_mtime_and_size ( path )
actual_inode = get_inode ( path )
existing_record = self . get_state_record_for_inode ( actual_inode )
if not existing_record :
self . _insert_new_state_record ( path , actual_inode , actual_mtime , actual_size , checksum )
return
self . _update_state_for_path_changed ( path , actual_inode , actual_mtime , actual_size , checksum )
|
def notify ( self , * args , ** kwargs ) :
"See signal"
|
loop = kwargs . pop ( 'loop' , self . loop )
return self . signal . prepare_notification ( subscribers = self . subscribers , instance = self . instance , loop = loop ) . run ( * args , ** kwargs )
|
def umi_consensus ( data ) :
"""Convert UMI grouped reads into fastq pair for re - alignment ."""
|
align_bam = dd . get_work_bam ( data )
umi_method , umi_tag = _check_umi_type ( align_bam )
f1_out = "%s-cumi-1.fq.gz" % utils . splitext_plus ( align_bam ) [ 0 ]
f2_out = "%s-cumi-2.fq.gz" % utils . splitext_plus ( align_bam ) [ 0 ]
avg_coverage = coverage . get_average_coverage ( "rawumi" , dd . get_variant_regions ( data ) , data )
if not utils . file_uptodate ( f1_out , align_bam ) :
with file_transaction ( data , f1_out , f2_out ) as ( tx_f1_out , tx_f2_out ) :
jvm_opts = _get_fgbio_jvm_opts ( data , os . path . dirname ( tx_f1_out ) , 2 )
# Improve speeds by avoiding compression read / write bottlenecks
io_opts = "--async-io=true --compression=0"
est_options = _estimate_fgbio_defaults ( avg_coverage )
group_opts , cons_opts , filter_opts = _get_fgbio_options ( data , est_options , umi_method )
cons_method = "CallDuplexConsensusReads" if umi_method == "paired" else "CallMolecularConsensusReads"
tempfile = "%s-bamtofastq-tmp" % utils . splitext_plus ( f1_out ) [ 0 ]
ref_file = dd . get_ref_file ( data )
cmd = ( "unset JAVA_HOME && " "fgbio {jvm_opts} {io_opts} GroupReadsByUmi {group_opts} -t {umi_tag} -s {umi_method} " "-i {align_bam} | " "fgbio {jvm_opts} {io_opts} {cons_method} {cons_opts} --sort-order=:none: " "-i /dev/stdin -o /dev/stdout | " "fgbio {jvm_opts} {io_opts} FilterConsensusReads {filter_opts} -r {ref_file} " "-i /dev/stdin -o /dev/stdout | " "bamtofastq collate=1 T={tempfile} F={tx_f1_out} F2={tx_f2_out} tags=cD,cM,cE gz=1" )
do . run ( cmd . format ( ** locals ( ) ) , "UMI consensus fastq generation" )
return f1_out , f2_out , avg_coverage
|
def rfc2822_format ( val ) :
"""Takes either a date , a datetime , or a string , and returns a string that
represents the value in RFC 2822 format . If a string is passed it is
returned unchanged ."""
|
if isinstance ( val , six . string_types ) :
return val
elif isinstance ( val , ( datetime . datetime , datetime . date ) ) : # Convert to a timestamp
val = time . mktime ( val . timetuple ( ) )
if isinstance ( val , numbers . Number ) :
return email . utils . formatdate ( val )
else : # Bail
return val
|
def drop_empty ( arr ) :
"""Drop empty array element
: param arr :
: return :"""
|
return [ x for x in arr if not isinstance ( x , list ) or len ( x ) > 0 ]
|
def rpc ( self , address , rpc_id ) :
"""Call an RPC and receive the result as an integer .
If the RPC does not properly return a 32 bit integer , raise a warning
unless it cannot be converted into an integer at all , in which case
a HardwareError is thrown .
Args :
address ( int ) : The address of the tile we want to call the RPC
on
rpc _ id ( int ) : The id of the RPC that we want to call
Returns :
int : The result of the RPC call . If the rpc did not succeed
an error is thrown instead ."""
|
# Always allow mocking an RPC to override whatever the defaul behavior is
if address in self . mock_rpcs and rpc_id in self . mock_rpcs [ address ] :
value = self . mock_rpcs [ address ] [ rpc_id ]
return value
result = self . _call_rpc ( address , rpc_id , bytes ( ) )
if len ( result ) != 4 :
self . warn ( u"RPC 0x%X on address %d: response had invalid length %d not equal to 4" % ( rpc_id , address , len ( result ) ) )
if len ( result ) < 4 :
raise HardwareError ( "Response from RPC was not long enough to parse as an integer" , rpc_id = rpc_id , address = address , response_length = len ( result ) )
if len ( result ) > 4 :
result = result [ : 4 ]
res , = struct . unpack ( "<L" , result )
return res
|
def is_valid_embedding ( emb , source , target ) :
"""A simple ( bool ) diagnostic for minor embeddings .
See : func : ` diagnose _ embedding ` for a more detailed diagnostic / more information .
Args :
emb ( dict ) : a dictionary mapping source nodes to arrays of target nodes
source ( graph or edgelist ) : the graph to be embedded
target ( graph or edgelist ) : the graph being embedded into
Returns :
bool : True if ` emb ` is valid ."""
|
for _ in diagnose_embedding ( emb , source , target ) :
return False
return True
|
def GetBalance ( self , asset_id , watch_only = 0 ) :
"""Get the balance of a specific token by its asset id .
Args :
asset _ id ( NEP5Token | TransactionOutput ) : an instance of type neo . Wallets . NEP5Token or neo . Core . TX . Transaction . TransactionOutput to get the balance from .
watch _ only ( bool ) : True , to limit to watch only wallets .
Returns :
Fixed8 : total balance ."""
|
total = Fixed8 ( 0 )
if type ( asset_id ) is NEP5Token . NEP5Token :
return self . GetTokenBalance ( asset_id , watch_only )
for coin in self . GetCoins ( ) :
if coin . Output . AssetId == asset_id :
if coin . State & CoinState . Confirmed > 0 and coin . State & CoinState . Spent == 0 and coin . State & CoinState . Locked == 0 and coin . State & CoinState . Frozen == 0 and coin . State & CoinState . WatchOnly == watch_only :
total = total + coin . Output . Value
return total
|
def fit ( self , counts_df , val_set = None ) :
"""Fit Hierarchical Poisson Model to sparse count data
Fits a hierarchical Poisson model to count data using mean - field approximation with either
full - batch coordinate - ascent or mini - batch stochastic coordinate - ascent .
Note
DataFrames and arrays passed to ' . fit ' might be modified inplace - if this is a problem you ' ll
need to pass a copy to them , e . g . ' counts _ df = counts _ df . copy ( ) ' .
Note
Forcibly terminating the procedure should still keep the last calculated shape and rate
parameter values , but is not recommended . If you need to make predictions on a forced - terminated
object , set the attribute ' is _ fitted ' to ' True ' .
Note
Fitting in mini - batches is more prone to numerical instability and compared to full - batch
variational inference , it is more likely that all your parameters will turn to NaNs ( which
means the optimization procedure failed ) .
Parameters
counts _ df : pandas data frame ( nobs , 3 ) or coo _ matrix
Input data with one row per non - zero observation , consisting of triplets ( ' UserId ' , ' ItemId ' , ' Count ' ) .
Must containin columns ' UserId ' , ' ItemId ' , and ' Count ' .
Combinations of users and items not present are implicitly assumed to be zero by the model .
Can also pass a sparse coo _ matrix , in which case ' reindex ' will be forced to ' False ' .
val _ set : pandas data frame ( nobs , 3)
Validation set on which to monitor log - likelihood . Same format as counts _ df .
Returns
self : obj
Copy of this object"""
|
# # a basic check
if self . stop_crit == 'val-llk' :
if val_set is None :
raise ValueError ( "If 'stop_crit' is set to 'val-llk', must provide a validation set." )
# # running each sub - process
if self . verbose :
self . _print_st_msg ( )
self . _process_data ( counts_df )
if self . verbose :
self . _print_data_info ( )
if ( val_set is not None ) and ( self . stop_crit != 'diff-norm' ) and ( self . stop_crit != 'train-llk' ) :
self . _process_valset ( val_set )
else :
self . val_set = None
self . _cast_before_fit ( )
self . _fit ( )
# # after terminating optimization
if self . keep_data :
if self . users_per_batch == 0 :
self . _store_metadata ( )
else :
self . _st_ix_user = self . _st_ix_user [ : - 1 ]
if self . produce_dicts and self . reindex :
self . user_dict_ = { self . user_mapping_ [ i ] : i for i in range ( self . user_mapping_ . shape [ 0 ] ) }
self . item_dict_ = { self . item_mapping_ [ i ] : i for i in range ( self . item_mapping_ . shape [ 0 ] ) }
self . is_fitted = True
del self . input_df
del self . val_set
return self
|
def merge_sketches ( outdir , sketch_paths ) :
"""Merge new Mash sketches with current Mash sketches
Args :
outdir ( str ) : output directory to write merged Mash sketch file
sketch _ paths ( list of str ) : Mash sketch file paths for input fasta files
Returns :
str : output path for Mash sketch file with new and old sketches"""
|
merge_sketch_path = os . path . join ( outdir , 'sistr.msh' )
args = [ 'mash' , 'paste' , merge_sketch_path ]
for x in sketch_paths :
args . append ( x )
args . append ( MASH_SKETCH_FILE )
logging . info ( 'Running Mash paste with command: %s' , ' ' . join ( args ) )
p = Popen ( args )
p . wait ( )
assert os . path . exists ( merge_sketch_path ) , 'Merged sketch was not created at {}' . format ( merge_sketch_path )
return merge_sketch_path
|
def warn_for_geometry_collections ( self ) :
"""Checks for GeoJson GeometryCollection features to warn user about incompatibility ."""
|
geom_collections = [ feature . get ( 'properties' ) if feature . get ( 'properties' ) is not None else key for key , feature in enumerate ( self . _parent . data [ 'features' ] ) if feature [ 'geometry' ] [ 'type' ] == 'GeometryCollection' ]
if any ( geom_collections ) :
warnings . warn ( "GeoJsonTooltip is not configured to render tooltips for GeoJson GeometryCollection geometries. " "Please consider reworking these features: {} to MultiPolygon for full functionality.\n" "https://tools.ietf.org/html/rfc7946#page-9" . format ( geom_collections ) , UserWarning )
|
def fix_config ( self , options ) :
"""Fixes the options , if necessary . I . e . , it adds all required elements to the dictionary .
: param options : the options to fix
: type options : dict
: return : the ( potentially ) fixed options
: rtype : dict"""
|
opt = "db_url"
if opt not in options :
options [ opt ] = "jdbc:mysql://somehost:3306/somedatabase"
if opt not in self . help :
self . help [ opt ] = "The JDBC database URL to connect to (str)."
opt = "user"
if opt not in options :
options [ opt ] = "user"
if opt not in self . help :
self . help [ opt ] = "The database user to use for connecting (str)."
opt = "password"
if opt not in options :
options [ opt ] = "secret"
if opt not in self . help :
self . help [ opt ] = "The password for the database user (str)."
opt = "query"
if opt not in options :
options [ opt ] = "SELECT * FROM table"
if opt not in self . help :
self . help [ opt ] = "The SQL query for generating the dataset (str)."
opt = "sparse"
if opt not in options :
options [ opt ] = False
if opt not in self . help :
self . help [ opt ] = "Whether to return the data in sparse format (bool)."
opt = "custom_props"
if opt not in options :
options [ opt ] = ""
if opt not in self . help :
self . help [ opt ] = "Custom properties filename (str)."
return super ( LoadDatabase , self ) . fix_config ( options )
|
def pack_column_flat ( self , value , components = None , offset = False ) :
"""TODO : add documentation"""
|
if components :
if isinstance ( components , str ) :
components = [ components ]
elif isinstance ( components , list ) :
components = components
else :
raise TypeError ( "components should be list or string, not {}" . format ( type ( components ) ) )
elif isinstance ( value , dict ) :
components = value . keys ( )
elif isinstance ( value , list ) :
components = self . _dict . keys ( )
value = { c : v for c , v in zip ( components , value ) }
if offset :
values = [ ]
offsetN = 0
for c in components :
values . append ( value [ c ] + offsetN )
offsetN += len ( self [ c ] [ 'vertices' ] )
else :
values = [ value [ c ] for c in components ]
if len ( value [ components [ 0 ] ] . shape ) > 1 :
return np . vstack ( values )
else :
return np . hstack ( values )
|
def to_column_format ( self , high_density_vertical = True ) :
"""Extract slices of an image as equal - sized blobs of column - format data .
: param high _ density _ vertical : Printed line height in dots"""
|
im = self . _im . transpose ( Image . ROTATE_270 ) . transpose ( Image . FLIP_LEFT_RIGHT )
line_height = 24 if high_density_vertical else 8
width_pixels , height_pixels = im . size
top = 0
left = 0
while left < width_pixels :
box = ( left , top , left + line_height , top + height_pixels )
im_slice = im . transform ( ( line_height , height_pixels ) , Image . EXTENT , box )
im_bytes = im_slice . tobytes ( )
yield ( im_bytes )
left += line_height
|
def read_legacy_cfg_files ( self , cfg_files , alignak_env_files = None ) : # pylint : disable = too - many - nested - blocks , too - many - statements
# pylint : disable = too - many - branches , too - many - locals
"""Read and parse the Nagios legacy configuration files
and store their content into a StringIO object which content
will be returned as the function result
: param cfg _ files : list of file to read
: type cfg _ files : list
: param alignak _ env _ files : name of the alignak environment file
: type alignak _ env _ files : list
: return : a buffer containing all files
: rtype : str"""
|
cfg_buffer = ''
if not cfg_files :
return cfg_buffer
# Update configuration with the first legacy configuration file name and path
# This will update macro properties
self . alignak_env = 'n/a'
if alignak_env_files is not None :
self . alignak_env = alignak_env_files
if not isinstance ( alignak_env_files , list ) :
self . alignak_env = [ os . path . abspath ( alignak_env_files ) ]
else :
self . alignak_env = [ os . path . abspath ( f ) for f in alignak_env_files ]
self . main_config_file = os . path . abspath ( cfg_files [ 0 ] )
self . config_base_dir = os . path . dirname ( self . main_config_file )
# Universal newline mode ( all new lines are managed internally )
res = StringIO ( u"# Configuration cfg_files buffer" , newline = None )
if not self . read_config_silent and cfg_files :
logger . info ( "Reading the configuration cfg_files..." )
# A first pass to get all the configuration cfg _ files in a buffer
for cfg_file in cfg_files : # Make sure the configuration cfg _ files are not repeated . . .
if os . path . abspath ( cfg_file ) in self . my_cfg_files :
logger . warning ( "- ignoring repeated file: %s" , os . path . abspath ( cfg_file ) )
continue
self . my_cfg_files . append ( os . path . abspath ( cfg_file ) )
# File header
res . write ( u"\n" )
res . write ( u"# imported_from=%s" % cfg_file )
res . write ( u"\n" )
if not self . read_config_silent :
logger . info ( "- opening '%s' configuration file" , cfg_file )
try : # Open in Universal way for Windows , Mac , Linux - based systems
file_d = open ( cfg_file , 'r' )
buf = file_d . readlines ( )
file_d . close ( )
except IOError as exp :
self . add_error ( "cannot open main file '%s' for reading: %s" % ( cfg_file , exp ) )
continue
for line in buf :
try :
line = line . decode ( 'utf8' , 'replace' )
except AttributeError : # Python 3 will raise an exception because the line is still unicode
pass
line = line . strip ( )
res . write ( line )
res . write ( u"\n" )
if ( re . search ( "^cfg_file" , line ) or re . search ( "^resource_file" , line ) ) and '=' in line :
elts = line . split ( '=' , 1 )
if os . path . isabs ( elts [ 1 ] ) :
cfg_file_name = elts [ 1 ]
else :
cfg_file_name = os . path . join ( self . config_base_dir , elts [ 1 ] )
cfg_file_name = cfg_file_name . strip ( )
cfg_file_name = os . path . abspath ( cfg_file_name )
# Make sure the configuration cfg _ files are not repeated . . .
if cfg_file_name in self . my_cfg_files :
logger . warning ( "- ignoring repeated file: %s" , cfg_file_name )
else :
self . my_cfg_files . append ( cfg_file_name )
if not self . read_config_silent :
logger . info ( " reading: %s" , cfg_file_name )
try : # Read the file content to the buffer
file_d = open ( cfg_file_name , 'r' )
# File header
res . write ( u"\n" )
res . write ( u"# imported_from=%s" % cfg_file_name )
res . write ( u"\n" )
content = file_d . read ( )
try :
content = content . decode ( 'utf8' , 'replace' )
except AttributeError : # Python 3 will raise an exception
pass
res . write ( content )
res . write ( u"\n" )
file_d . close ( )
except IOError as exp :
self . add_error ( u"cannot open file '%s' for reading: %s" % ( cfg_file_name , exp ) )
elif re . search ( "^cfg_dir" , line ) and '=' in line :
elts = line . split ( '=' , 1 )
if os . path . isabs ( elts [ 1 ] ) :
cfg_dir_name = elts [ 1 ]
else :
cfg_dir_name = os . path . join ( self . config_base_dir , elts [ 1 ] )
# Ok , look if it ' s really a directory
if not os . path . isdir ( cfg_dir_name ) :
self . add_error ( u"cannot open directory '%s' for reading" % cfg_dir_name )
continue
# Now walk for it .
for root , _ , walk_files in os . walk ( cfg_dir_name , followlinks = True ) :
for found_file in walk_files :
if not re . search ( r"\.cfg$" , found_file ) :
continue
cfg_file_name = os . path . join ( root , found_file )
# Make sure the configuration cfg _ files are not repeated . . .
if os . path . abspath ( cfg_file_name ) in self . my_cfg_files :
logger . warning ( "- ignoring repeated file: %s" , cfg_file_name )
else :
self . my_cfg_files . append ( cfg_file_name )
if not self . read_config_silent :
logger . info ( " reading: %s" , cfg_file_name )
try : # Read the file content to the buffer
file_d = open ( cfg_file_name , 'r' )
# File header
res . write ( u"\n" )
res . write ( u"# imported_from=%s" % cfg_file_name )
res . write ( u"\n" )
content = file_d . read ( )
try :
content = content . decode ( 'utf8' , 'replace' )
except AttributeError : # Python 3 will raise an exception
pass
res . write ( content )
res . write ( u"\n" )
file_d . close ( )
except IOError as exp :
self . add_error ( u"cannot open file '%s' for reading: %s" % ( cfg_file_name , exp ) )
cfg_buffer = res . getvalue ( )
res . close ( )
return cfg_buffer
|
def get_stft_kernels ( n_dft ) :
"""[ np ] Return dft kernels for real / imagnary parts assuming
the input . is real .
An asymmetric hann window is used ( scipy . signal . hann ) .
Parameters
n _ dft : int > 0 and power of 2 [ scalar ]
Number of dft components .
Returns
| dft _ real _ kernels : np . ndarray [ shape = ( nb _ filter , 1 , 1 , n _ win ) ]
| dft _ imag _ kernels : np . ndarray [ shape = ( nb _ filter , 1 , 1 , n _ win ) ]
* nb _ filter = n _ dft / 2 + 1
* n _ win = n _ dft"""
|
assert n_dft > 1 and ( ( n_dft & ( n_dft - 1 ) ) == 0 ) , ( 'n_dft should be > 1 and power of 2, but n_dft == %d' % n_dft )
nb_filter = int ( n_dft // 2 + 1 )
# prepare DFT filters
timesteps = np . array ( range ( n_dft ) )
w_ks = np . arange ( nb_filter ) * 2 * np . pi / float ( n_dft )
dft_real_kernels = np . cos ( w_ks . reshape ( - 1 , 1 ) * timesteps . reshape ( 1 , - 1 ) )
dft_imag_kernels = - np . sin ( w_ks . reshape ( - 1 , 1 ) * timesteps . reshape ( 1 , - 1 ) )
# windowing DFT filters
dft_window = librosa . filters . get_window ( 'hann' , n_dft , fftbins = True )
# _ hann ( n _ dft , sym = False )
dft_window = dft_window . astype ( K . floatx ( ) )
dft_window = dft_window . reshape ( ( 1 , - 1 ) )
dft_real_kernels = np . multiply ( dft_real_kernels , dft_window )
dft_imag_kernels = np . multiply ( dft_imag_kernels , dft_window )
dft_real_kernels = dft_real_kernels . transpose ( )
dft_imag_kernels = dft_imag_kernels . transpose ( )
dft_real_kernels = dft_real_kernels [ : , np . newaxis , np . newaxis , : ]
dft_imag_kernels = dft_imag_kernels [ : , np . newaxis , np . newaxis , : ]
return dft_real_kernels . astype ( K . floatx ( ) ) , dft_imag_kernels . astype ( K . floatx ( ) )
|
def apply_scopes ( self ) :
"""Get the underlying query builder instance with applied global scopes .
: type : Builder"""
|
if not self . _scopes :
return self
builder = copy . copy ( self )
query = builder . get_query ( )
# We will keep track of how many wheres are on the query before running the
# scope so that we can properly group the added scope constraints in the
# query as their own isolated nested where statement and avoid issues .
original_where_count = len ( query . wheres )
where_counts = [ 0 , original_where_count ]
for scope in self . _scopes . values ( ) :
self . _apply_scope ( scope , builder )
# Again , we will keep track of the count each time we add where clauses so that
# we will properly isolate each set of scope constraints inside of their own
# nested where clause to avoid any conflicts or issues with logical order .
where_counts . append ( len ( query . wheres ) )
if self . _should_nest_wheres_for_scope ( query , original_where_count ) :
self . _nest_wheres_for_scope ( query , Collection ( where_counts ) . unique ( ) . all ( ) )
return builder
|
def note_revert ( self , note_id , version_id ) :
"""Function to revert a specific note ( Requires login ) ( UNTESTED ) .
Parameters :
note _ id ( int ) : Where note _ id is the note id .
version _ id ( int ) : The note version id to revert to ."""
|
return self . _get ( 'notes/{0}/revert.json' . format ( note_id ) , { 'version_id' : version_id } , method = 'PUT' , auth = True )
|
def get_per_channel_mean ( self , names = ( 'train' , 'test' ) ) :
"""Args :
names ( tuple [ str ] ) : the names ( ' train ' or ' test ' ) of the datasets
Returns :
An array of three values as mean of each channel , for all images in the given datasets ."""
|
mean = self . get_per_pixel_mean ( names )
return np . mean ( mean , axis = ( 0 , 1 ) )
|
def deltafmt ( delta , decimals = None ) :
"""Returns a human readable representation of a time with the format :
[ [ [ Ih ] Jm ] K [ . L ] s
For example : 6h5m23s
If " decimals " is specified , the seconds will be output with that many decimal places .
If not , there will be two places for times less than 1 minute , one place for times
less than 10 minutes , and zero places otherwise"""
|
try :
delta = float ( delta )
except :
return '(bad delta: %s)' % ( str ( delta ) , )
if delta < 60 :
if decimals is None :
decimals = 2
return ( "{0:." + str ( decimals ) + "f}s" ) . format ( delta )
mins = int ( delta / 60 )
secs = delta - mins * 60
if delta < 600 :
if decimals is None :
decimals = 1
return ( "{0:d}m{1:." + str ( decimals ) + "f}s" ) . format ( mins , secs )
if decimals is None :
decimals = 0
hours = int ( mins / 60 )
mins -= hours * 60
if delta < 3600 :
return "{0:d}m{1:.0f}s" . format ( mins , secs )
else :
return ( "{0:d}h{1:d}m{2:." + str ( decimals ) + "f}s" ) . format ( hours , mins , secs )
|
def _set_ignored_version ( version ) :
"""Private helper function that writes the most updated
API version that was ignored by a user in the app
: param version : Most recent ignored API update"""
|
data = { 'version' : version }
with open ( filepath , 'w' ) as data_file :
json . dump ( data , data_file )
|
def update ( self , truth , guess , features ) :
"""Update the feature weights ."""
|
def upd_feat ( c , f , w , v ) :
param = ( f , c )
self . _totals [ param ] += ( self . i - self . _tstamps [ param ] ) * w
self . _tstamps [ param ] = self . i
self . weights [ f ] [ c ] = w + v
self . i += 1
if truth == guess :
return None
for f in features :
weights = self . weights . setdefault ( f , { } )
upd_feat ( truth , f , weights . get ( truth , 0.0 ) , 1.0 )
upd_feat ( guess , f , weights . get ( guess , 0.0 ) , - 1.0 )
return None
|
def loadFeatures ( self , path_to_fc ) :
"""loads a feature class features to the object"""
|
from . . common . spatial import featureclass_to_json
v = json . loads ( featureclass_to_json ( path_to_fc ) )
self . value = v
|
def _init_metadata ( self ) :
"""stub"""
|
QuestionTextFormRecord . _init_metadata ( self )
QuestionFilesFormRecord . _init_metadata ( self )
super ( QuestionTextAndFilesMixin , self ) . _init_metadata ( )
|
def init_net_params ( scale , layer_sizes , rs = npr . RandomState ( 0 ) ) :
"""Build a ( weights , biases ) tuples for all layers ."""
|
return [ ( scale * rs . randn ( m , n ) , # weight matrix
scale * rs . randn ( n ) ) # bias vector
for m , n in zip ( layer_sizes [ : - 1 ] , layer_sizes [ 1 : ] ) ]
|
def disableIndexing ( self ) :
'''disableIndexing - Disables indexing . Consider using plain AdvancedHTMLParser class .
Maybe useful in some scenarios where you want to parse , add a ton of elements , then index
and do a bunch of searching .'''
|
self . indexIDs = self . indexNames = self . indexClassNames = self . indexTagNames = False
self . _resetIndexInternal ( )
|
def assemble ( self ) :
"""Mangle self into an argument array"""
|
self . canonify ( )
args = [ sys . argv and sys . argv [ 0 ] or "python" ]
if self . mountpoint :
args . append ( self . mountpoint )
for m , v in self . modifiers . items ( ) :
if v :
args . append ( self . fuse_modifiers [ m ] )
opta = [ ]
for o , v in self . optdict . items ( ) :
opta . append ( o + '=' + v )
opta . extend ( self . optlist )
if opta :
args . append ( "-o" + "," . join ( opta ) )
return args
|
def create ( self , quality_score , issue = values . unset ) :
"""Create a new FeedbackInstance
: param unicode quality _ score : The call quality expressed as an integer from 1 to 5
: param FeedbackInstance . Issues issue : Issues experienced during the call
: returns : Newly created FeedbackInstance
: rtype : twilio . rest . api . v2010 . account . call . feedback . FeedbackInstance"""
|
data = values . of ( { 'QualityScore' : quality_score , 'Issue' : serialize . map ( issue , lambda e : e ) , } )
payload = self . _version . create ( 'POST' , self . _uri , data = data , )
return FeedbackInstance ( self . _version , payload , account_sid = self . _solution [ 'account_sid' ] , call_sid = self . _solution [ 'call_sid' ] , )
|
def next ( self , timeout = 0 ) :
"""Return empty unless new data is ready for the client .
Arguments :
timeout : Default timeout = 0 range zero to float specifies a time - out as a floating point
number in seconds . Will sit and wait for timeout seconds . When the timeout argument is omitted
the function blocks until at least one file descriptor is ready . A time - out value of zero specifies
a poll and never blocks ."""
|
try :
waitin , _waitout , _waiterror = select . select ( ( self . streamSock , ) , ( ) , ( ) , timeout )
if not waitin :
return
else :
gpsd_response = self . streamSock . makefile ( )
# ' . makefile ( buffering = 4096 ) ' In strictly Python3
self . response = gpsd_response . readline ( )
return self . response
except StopIteration as error :
sys . stderr . write ( 'The readline exception in GPSDSocket.next is--> {}' . format ( error ) )
|
def diff ( self , filename , wildcard = '*' ) :
'''show differences with another parameter file'''
|
other = MAVParmDict ( )
if not other . load ( filename ) :
return
keys = sorted ( list ( set ( self . keys ( ) ) . union ( set ( other . keys ( ) ) ) ) )
for k in keys :
if not fnmatch . fnmatch ( str ( k ) . upper ( ) , wildcard . upper ( ) ) :
continue
if not k in other :
print ( "%-16.16s %12.4f" % ( k , self [ k ] ) )
elif not k in self :
print ( "%-16.16s %12.4f" % ( k , other [ k ] ) )
elif abs ( self [ k ] - other [ k ] ) > self . mindelta :
print ( "%-16.16s %12.4f %12.4f" % ( k , other [ k ] , self [ k ] ) )
|
def created ( filename ) :
'''Retrieve how long ago a file has been created .
: param filename : name of the file
> > > print created ( ' / ' ) # doctest : + SKIP
8 weeks ago'''
|
if isinstance ( filename , file ) :
filename = filename . name
return duration ( os . stat ( filename ) [ stat . ST_CTIME ] )
|
def about_time ( fn = None , it = None ) :
"""Measures the execution time of a block of code , and even counts iterations
and the throughput of them , always with a beautiful " human " representation .
There ' s three modes of operation : context manager , callable handler and
iterator metrics .
1 . Use it like a context manager :
> > > with about _ time ( ) as t _ whole :
. . . . with about _ time ( ) as t _ 1:
. . . . func _ 1 ( )
. . . . with about _ time ( ) as t _ 2:
. . . . func _ 2 ( ' params ' )
> > > print ( f ' func _ 1 time : { t _ 1 . duration _ human } ' )
> > > print ( f ' func _ 2 time : { t _ 2 . duration _ human } ' )
> > > print ( f ' total time : { t _ whole . duration _ human } ' )
The actual duration in seconds is available in :
> > > secs = t _ whole . duration
2 . You can also use it like a callable handler :
> > > t _ 1 = about _ time ( func _ 1)
> > > t _ 2 = about _ time ( lambda : func _ 2 ( ' params ' ) )
Use the field ` result ` to get the outcome of the function .
Or you mix and match both :
> > > with about _ time ( ) as t _ whole :
. . . . t _ 1 = about _ time ( func _ 1)
. . . . t _ 2 = about _ time ( lambda : func _ 2 ( ' params ' ) )
3 . And you can count and , since we have duration , also measure the throughput
of an iterator block , specially useful in generators , which do not have length ,
but you can use with any iterables :
> > > def callback ( t _ func ) :
. . . . logger . info ( ' func : size = % d throughput = % s ' , t _ func . count ,
. . . . t _ func . throughput _ human )
> > > items = filter ( . . . )
> > > for item in about _ time ( callback , items ) :
. . . . # use item any way you want .
. . . . pass"""
|
# has to be here to be mockable .
if sys . version_info >= ( 3 , 3 ) :
timer = time . perf_counter
else : # pragma : no cover
timer = time . time
@ contextmanager
def context ( ) :
timings [ 0 ] = timer ( )
yield handle
timings [ 1 ] = timer ( )
timings = [ 0.0 , 0.0 ]
handle = Handle ( timings )
if it is None : # use as context manager .
if fn is None :
return context ( )
# use as callable handler .
with context ( ) :
result = fn ( )
return HandleResult ( timings , result )
# use as counter / throughput iterator .
if fn is None or not callable ( fn ) : # handles inversion of parameters .
raise UserWarning ( 'use as about_time(callback, iterable) in counter/throughput mode.' )
def counter ( ) :
i = - 1
with context ( ) :
for i , elem in enumerate ( it ) :
yield elem
fn ( HandleStats ( timings , i + 1 ) )
return counter ( )
|
def set_matrix ( self , matrix ) :
"""Sets the pattern ’ s transformation matrix to : obj : ` matrix ` .
This matrix is a transformation from user space to pattern space .
When a pattern is first created
it always has the identity matrix for its transformation matrix ,
which means that pattern space is initially identical to user space .
* * Important : * *
Please note that the direction of this transformation matrix
is from user space to pattern space .
This means that if you imagine the flow
from a pattern to user space ( and on to device space ) ,
then coordinates in that flow will be transformed
by the inverse of the pattern matrix .
For example , if you want to make a pattern appear twice as large
as it does by default the correct code to use is : :
pattern . set _ matrix ( Matrix ( xx = 0.5 , yy = 0.5 ) )
Meanwhile , using values of 2 rather than 0.5 in the code above
would cause the pattern to appear at half of its default size .
Also , please note the discussion of the user - space locking semantics
of : meth : ` Context . set _ source ` .
: param matrix : A : class : ` Matrix ` to be copied into the pattern ."""
|
cairo . cairo_pattern_set_matrix ( self . _pointer , matrix . _pointer )
self . _check_status ( )
|
def gate_angle ( gate0 : Gate , gate1 : Gate ) -> bk . BKTensor :
"""The Fubini - Study angle between gates"""
|
return fubini_study_angle ( gate0 . vec , gate1 . vec )
|
def close ( self ) :
"""Iterate through all of the connections and close each one ."""
|
if not self . _closed :
self . _closed = True
self . _stop_multi_pools ( )
if self . _http_pool is not None :
self . _http_pool . clear ( )
self . _http_pool = None
if self . _tcp_pool is not None :
self . _tcp_pool . clear ( )
self . _tcp_pool = None
|
def list_csi_driver ( self , ** kwargs ) :
"""list or watch objects of kind CSIDriver
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . list _ csi _ driver ( async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str pretty : If ' true ' , then the output is pretty printed .
: param str _ continue : The continue option should be set when retrieving more results from the server . Since this value is server defined , clients may only use the continue value from a previous query result with identical query parameters ( except for the value of continue ) and the server may reject a continue value it does not recognize . If the specified continue value is no longer valid whether due to expiration ( generally five to fifteen minutes ) or a configuration change on the server , the server will respond with a 410 ResourceExpired error together with a continue token . If the client needs a consistent list , it must restart their list without the continue field . Otherwise , the client may send another list request with the token received with the 410 error , the server will respond with a list starting from the next key , but from the latest snapshot , which is inconsistent from the previous list results - objects that are created , modified , or deleted after the first list request will be included in the response , as long as their keys are after the \" next key \" . This field is not supported when watch is true . Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications .
: param str field _ selector : A selector to restrict the list of returned objects by their fields . Defaults to everything .
: param str label _ selector : A selector to restrict the list of returned objects by their labels . Defaults to everything .
: param int limit : limit is a maximum number of responses to return for a list call . If more items exist , the server will set the ` continue ` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results . Setting a limit may return fewer than the requested amount of items ( up to zero items ) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available . Servers may choose not to support the limit argument and will return all of the available results . If limit is specified and the continue field is empty , clients may assume that no more results are available . This field is not supported if watch is true . The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is , no objects created , modified , or deleted after the first request is issued will be included in any subsequent continued requests . This is sometimes referred to as a consistent snapshot , and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects . If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned .
: param str resource _ version : When specified with a watch call , shows changes that occur after that particular version of a resource . Defaults to changes from the beginning of history . When specified for list : - if unset , then the result is returned from remote storage based on quorum - read flag ; - if it ' s 0 , then we simply return what we currently have in cache , no guarantee ; - if set to non zero , then the result is at least as fresh as given rv .
: param int timeout _ seconds : Timeout for the list / watch call . This limits the duration of the call , regardless of any activity or inactivity .
: param bool watch : Watch for changes to the described resources and return them as a stream of add , update , and remove notifications . Specify resourceVersion .
: return : V1beta1CSIDriverList
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . list_csi_driver_with_http_info ( ** kwargs )
else :
( data ) = self . list_csi_driver_with_http_info ( ** kwargs )
return data
|
def data ( self , column , role ) :
"""Return the data for the column and role
: param column : the data column
: type column : int
: param role : the data role
: type role : QtCore . Qt . ItemDataRole
: returns : data depending on the role
: rtype :
: raises : None"""
|
if self . _data is not None and ( column >= 0 or column < self . _data . column_count ( ) ) :
return self . _data . data ( column , role )
|
def _account_table ( accounts ) :
"""creates a lookup table ( emailaddress - > account ) for a given list of
accounts
: param accounts : list of accounts
: type accounts : list of ` alot . account . Account `
: returns : hashtable
: rvalue : dict ( str - > ` alot . account . Account ` )"""
|
accountmap = { }
for acc in accounts :
accountmap [ acc . address ] = acc
for alias in acc . aliases :
accountmap [ alias ] = acc
return accountmap
|
def remove_attr ( self , attr_name ) :
"""Remove cookie attribute . Cookie attribute couldn ' t be removed if cookie is in read - only mode
( RuntimeError exception is raised ) .
: param attr _ name : name of attribute to remove
: return : None"""
|
if self . __ro_flag :
raise RuntimeError ( 'Read-only cookie changing attempt' )
name = self . __attr_name ( attr_name )
if name in self . __attrs . keys ( ) :
self . __attrs . pop ( attr_name )
|
def colorMap ( value , name = "jet" , vmin = None , vmax = None ) :
"""Map a real value in range [ vmin , vmax ] to a ( r , g , b ) color scale .
: param value : scalar value to transform into a color
: type value : float , list
: param name : color map name
: type name : str , matplotlib . colors . LinearSegmentedColormap
: return : ( r , g , b ) color , or a list of ( r , g , b ) colors .
. . note : : Available color maps :
| colormaps |
. . tip : : Can also use directly a matplotlib color map :
: Example :
. . code - block : : python
from vtkplotter import colorMap
import matplotlib . cm as cm
print ( colorMap ( 0.2 , cm . flag , 0 , 1 ) )
(1.0 , 0.809016994374948 , 0.6173258487801733)"""
|
if not _mapscales :
print ( "-------------------------------------------------------------------" )
print ( "WARNING : cannot import matplotlib.cm (colormaps will show up gray)." )
print ( "Try e.g.: sudo apt-get install python3-matplotlib" )
print ( " or : pip install matplotlib" )
print ( " or : build your own map (see example in basic/mesh_custom.py)." )
return ( 0.5 , 0.5 , 0.5 )
if isinstance ( name , matplotlib . colors . LinearSegmentedColormap ) :
mp = name
else :
if name in _mapscales . keys ( ) :
mp = _mapscales [ name ]
else :
print ( "Error in colorMap():" , name , "\navaliable maps =" , sorted ( _mapscales . keys ( ) ) )
exit ( 0 )
if _isSequence ( value ) :
values = np . array ( value )
if vmin is None :
vmin = np . min ( values )
if vmax is None :
vmax = np . max ( values )
values = np . clip ( values , vmin , vmax )
values -= vmin
values /= vmax - vmin
cols = [ ]
mp = _mapscales [ name ]
for v in values :
cols . append ( mp ( v ) [ 0 : 3 ] )
return np . array ( cols )
else :
value -= vmin
value /= vmax - vmin
if value > 0.999 :
value = 0.999
elif value < 0 :
value = 0
return mp ( value ) [ 0 : 3 ]
|
def disassociate_environment_option_pool ( self , environment_option_id ) :
"""Remove a relationship of optionpool with Environment .
: param id _ option _ pool : Identifier of the Option Pool . Integer value and greater than zero .
: param id _ environment : Identifier of the Environment Pool . Integer value and greater than zero .
: return : { ' id ' : < environment _ option _ id > }
: raise InvalidParameterError : Option Pool / Environment Pool identifier is null and / or invalid .
: raise optionpoolNotFoundError : Option Pool not registered .
: raise EnvironmentVipNotFoundError : Environment VIP not registered .
: raise optionpoolError : Option pool is not associated with the environment pool
: raise UserNotAuthorizedError : User does not have authorization to make this association .
: raise DataBaseError : Networkapi failed to access the database .
: raise XMLError : Networkapi failed to generate the XML response ."""
|
if not is_valid_int_param ( environment_option_id ) :
raise InvalidParameterError ( u'The identifier of Option Pool is invalid or was not informed.' )
if not is_valid_int_param ( environment_option_id ) :
raise InvalidParameterError ( u'The identifier of Environment Pool is invalid or was not informed.' )
url = 'api/pools/environment_options/' + str ( environment_option_id ) + '/'
return self . delete ( url )
|
def arguments ( self ) :
"""Get an iterable object providing each argument in the
command line for the compiler invocation as a _ CXString .
Invariant : the first argument is the compiler executable"""
|
length = conf . lib . clang_CompileCommand_getNumArgs ( self . cmd )
for i in xrange ( length ) :
yield str ( conf . lib . clang_CompileCommand_getArg ( self . cmd , i ) )
|
def get_hosts_from_csv ( filename , default_protocol = 'telnet' , default_domain = '' , encoding = 'utf-8' ) :
"""Reads a list of hostnames and variables from the tab - separated . csv file
with the given name . The first line of the file must contain the column
names , e . g . : :
addresstestvar1testvar2
10.0.0.1value1othervalue
10.0.0.1value2othervalue2
10.0.0.2foobar
For the above example , the function returns * two * host objects , where
the ' testvar1 ' variable of the first host holds a list containing two
entries ( ' value1 ' and ' value2 ' ) , and the ' testvar1 ' variable of the
second host contains a list with a single entry ( ' foo ' ) .
Both , the address and the hostname of each host are set to the address
given in the first column . If you want the hostname set to another value ,
you may add a second column containing the hostname : :
addresshostnametestvar
10.0.0.1myhostvalue
10.0.0.2otherhostothervalue
: type filename : string
: param filename : A full filename .
: type default _ protocol : str
: param default _ protocol : Passed to the Host constructor .
: type default _ domain : str
: param default _ domain : Appended to each hostname that has no domain .
: type encoding : str
: param encoding : The encoding of the file .
: rtype : list [ Host ]
: return : The newly created host instances ."""
|
# Open the file .
if not os . path . exists ( filename ) :
raise IOError ( 'No such file: %s' % filename )
with codecs . open ( filename , 'r' , encoding ) as file_handle : # Read and check the header .
header = file_handle . readline ( ) . rstrip ( )
if re . search ( r'^(?:hostname|address)\b' , header ) is None :
msg = 'Syntax error in CSV file header:'
msg += ' File does not start with "hostname" or "address".'
raise Exception ( msg )
if re . search ( r'^(?:hostname|address)(?:\t[^\t]+)*$' , header ) is None :
msg = 'Syntax error in CSV file header:'
msg += ' Make sure to separate columns by tabs.'
raise Exception ( msg )
varnames = [ str ( v ) for v in header . split ( '\t' ) ]
varnames . pop ( 0 )
# Walk through all lines and create a map that maps hostname to
# definitions .
last_uri = ''
line_re = re . compile ( r'[\r\n]*$' )
hosts = [ ]
for line in file_handle :
if line . strip ( ) == '' :
continue
line = line_re . sub ( '' , line )
values = line . split ( '\t' )
uri = values . pop ( 0 ) . strip ( )
# Add the hostname to our list .
if uri != last_uri : # print " Reading hostname " , hostname _ url , " from csv . "
host = to_host ( uri , default_protocol , default_domain )
last_uri = uri
hosts . append ( host )
# Define variables according to the definition .
for i , varname in enumerate ( varnames ) :
try :
value = values [ i ]
except IndexError :
value = ''
if varname == 'hostname' :
host . set_name ( value )
else :
host . append ( varname , value )
return hosts
|
def circular_hough ( img , radius , nangles = None , mask = None ) :
'''Circular Hough transform of an image
img - image to be transformed .
radius - radius of circle
nangles - # of angles to measure , e . g . nangles = 4 means accumulate at
0 , 90 , 180 and 270 degrees .
Return the Hough transform of the image which is the accumulators
for the transform x + r cos t , y + r sin t .'''
|
a = np . zeros ( img . shape )
m = np . zeros ( img . shape )
if nangles is None : # if no angle specified , take the circumference
# Round to a multiple of 4 to make it bilaterally stable
nangles = int ( np . pi * radius + 3.5 ) & ( ~ 3 )
for i in range ( nangles ) :
theta = 2 * np . pi * float ( i ) / float ( nangles )
x = int ( np . round ( radius * np . cos ( theta ) ) )
y = int ( np . round ( radius * np . sin ( theta ) ) )
xmin = max ( 0 , - x )
xmax = min ( img . shape [ 1 ] - x , img . shape [ 1 ] )
ymin = max ( 0 , - y )
ymax = min ( img . shape [ 0 ] - y , img . shape [ 0 ] )
dest = ( slice ( ymin , ymax ) , slice ( xmin , xmax ) )
src = ( slice ( ymin + y , ymax + y ) , slice ( xmin + x , xmax + x ) )
if mask is not None :
a [ dest ] [ mask [ src ] ] += img [ src ] [ mask [ src ] ]
m [ dest ] [ mask [ src ] ] += 1
else :
a [ dest ] += img [ src ]
m [ dest ] += 1
a [ m > 0 ] /= m [ m > 0 ]
return a
|
def cyl_to_rect ( R , phi , Z ) :
"""NAME :
cyl _ to _ rect
PURPOSE :
convert from cylindrical to rectangular coordinates
INPUT :
R , phi , Z - cylindrical coordinates
OUTPUT :
X , Y , Z
HISTORY :
2011-02-23 - Written - Bovy ( NYU )"""
|
return ( R * sc . cos ( phi ) , R * sc . sin ( phi ) , Z )
|
def _compile_references ( self , url , tree ) :
'''Returns a list of catalog reference URLs for the current catalog
: param str url : URL for the current catalog
: param lxml . etree . Eleemnt tree : Current XML Tree'''
|
references = [ ]
for ref in tree . findall ( './/{%s}catalogRef' % INV_NS ) : # Check skips
title = ref . get ( "{%s}title" % XLINK_NS )
if any ( [ x . match ( title ) for x in self . skip ] ) :
logger . info ( "Skipping catalogRef based on 'skips'. Title: %s" % title )
continue
references . append ( construct_url ( url , ref . get ( "{%s}href" % XLINK_NS ) ) )
return references
|
def apply ( filter ) :
"""Manufacture decorator that filters return value with given function .
` ` filter ` ` :
Callable that takes a single parameter ."""
|
def decorator ( callable ) :
return lambda * args , ** kwargs : filter ( callable ( * args , ** kwargs ) )
return decorator
|
def _process_mrk_marker_view ( self , limit ) :
"""This is the definition of markers
( as in genes , but other genomic loci types as well ) .
It looks up the identifiers in the hashmap
This includes their labels , specific class , and identifiers
TODO should we use the mrk _ mouse _ view instead ?
Triples :
< marker _ id > a owl : Class OR owl : NamedIndividual
GENO : marker _ type
rdf : label < symbol >
RO : in _ taxon < NCBITaxon _ id >
: param limit :
: return :"""
|
if self . test_mode :
graph = self . testgraph
else :
graph = self . graph
model = Model ( graph )
geno = Genotype ( graph )
line_counter = 0
raw = '/' . join ( ( self . rawdir , 'mrk_marker_view' ) )
LOG . info ( "getting markers and assigning types" )
with open ( raw , 'r' ) as f :
f . readline ( )
# read the header row ; skip
for line in f :
line = line . rstrip ( "\n" )
line_counter += 1
( marker_key , organism_key , marker_status_key , symbol , name , latin_name , marker_type ) = line . split ( '\t' )
if self . test_mode is True :
if int ( marker_key ) not in self . test_keys . get ( 'marker' ) :
continue
# use only non - withdrawn markers
if marker_status_key != '2' :
marker_id = self . idhash [ 'marker' ] . get ( marker_key )
# only pull info for mouse genes for now
# other species should come from other dbs
if organism_key != '1' :
continue
if marker_id is None :
LOG . error ( "can't find %s %s in the id hash" , marker_key , symbol )
mapped_marker_type = self . resolve ( marker_type . strip ( ) )
# if it ' s unlocated , or is not a gene ,
# then don ' t add it as a class because
# it ' s not added as a gene .
# everything except for genes are modeled as individuals
if mapped_marker_type in [ self . globaltt [ 'gene' ] , self . globaltt [ 'pseudogene' ] ] :
model . addClassToGraph ( marker_id , symbol , mapped_marker_type , name )
model . addSynonym ( marker_id , name , self . globaltt [ 'has_exact_synonym' ] )
self . markers [ 'classes' ] . append ( marker_id )
else :
model . addIndividualToGraph ( marker_id , symbol , mapped_marker_type , name )
model . addSynonym ( marker_id , name , self . globaltt [ 'has_exact_synonym' ] )
self . markers [ 'indiv' ] . append ( marker_id )
self . label_hash [ marker_id ] = symbol
# add the taxon
taxon_id = self . resolve ( latin_name )
# not always proper binomial
geno . addTaxon ( taxon_id , marker_id )
# make MGI the leader for mouse genes .
if taxon_id == self . globaltt [ 'Mus musculus' ] :
model . makeLeader ( marker_id )
if not self . test_mode and limit is not None and line_counter > limit :
break
return
|
def demote ( self , mode ) :
"""Demote PostgreSQL running as master .
: param mode : One of offline , graceful or immediate .
offline is used when connection to DCS is not available .
graceful is used when failing over to another node due to user request . May only be called running async .
immediate is used when we determine that we are not suitable for master and want to failover quickly
without regard for data durability . May only be called synchronously .
immediate - nolock is used when find out that we have lost the lock to be master . Need to bring down
PostgreSQL as quickly as possible without regard for data durability . May only be called synchronously ."""
|
mode_control = { 'offline' : dict ( stop = 'fast' , checkpoint = False , release = False , offline = True , async_req = False ) , 'graceful' : dict ( stop = 'fast' , checkpoint = True , release = True , offline = False , async_req = False ) , 'immediate' : dict ( stop = 'immediate' , checkpoint = False , release = True , offline = False , async_req = True ) , 'immediate-nolock' : dict ( stop = 'immediate' , checkpoint = False , release = False , offline = False , async_req = True ) , } [ mode ]
self . state_handler . trigger_check_diverged_lsn ( )
self . state_handler . stop ( mode_control [ 'stop' ] , checkpoint = mode_control [ 'checkpoint' ] , on_safepoint = self . watchdog . disable if self . watchdog . is_running else None )
self . state_handler . set_role ( 'demoted' )
self . set_is_leader ( False )
if mode_control [ 'release' ] :
with self . _async_executor :
self . release_leader_key_voluntarily ( )
time . sleep ( 2 )
# Give a time to somebody to take the leader lock
if mode_control [ 'offline' ] :
node_to_follow , leader = None , None
else :
cluster = self . dcs . get_cluster ( )
node_to_follow , leader = self . _get_node_to_follow ( cluster ) , cluster . leader
# FIXME : with mode offline called from DCS exception handler and handle _ long _ action _ in _ progress
# there could be an async action already running , calling follow from here will lead
# to racy state handler state updates .
if mode_control [ 'async_req' ] :
self . _async_executor . schedule ( 'starting after demotion' )
self . _async_executor . run_async ( self . state_handler . follow , ( node_to_follow , ) )
else :
if self . is_synchronous_mode ( ) :
self . state_handler . set_synchronous_standby ( None )
if self . state_handler . rewind_or_reinitialize_needed_and_possible ( leader ) :
return False
# do not start postgres , but run pg _ rewind on the next iteration
self . state_handler . follow ( node_to_follow )
|
def thin ( image , mask = None , iterations = 1 ) :
'''Thin an image to lines , preserving Euler number
Implements thinning as described in algorithm # 1 from
Guo , " Parallel Thinning with Two Subiteration Algorithms " ,
Communications of the ACM , Vol 32 # 3 page 359.'''
|
global thin_table , eight_connect
if thin_table is None :
thin_table = np . zeros ( ( 2 , 512 ) , bool )
for i in range ( 512 ) :
if ( i & 16 ) == 0 : # All zeros - > 0
continue
pat = pattern_of ( i & ~ 16 )
ipat = pat . astype ( int )
if scind . label ( pat , eight_connect ) [ 1 ] != 1 :
thin_table [ : , i ] = True
continue
n1 = ( ( ipat [ 0 , 0 ] or ipat [ 0 , 1 ] ) + ( ipat [ 0 , 2 ] or ipat [ 1 , 2 ] ) + ( ipat [ 2 , 2 ] or ipat [ 2 , 1 ] ) + ( ipat [ 2 , 0 ] or ipat [ 1 , 0 ] ) )
n2 = ( ( ipat [ 0 , 1 ] or ipat [ 0 , 2 ] ) + ( ipat [ 1 , 2 ] or ipat [ 2 , 2 ] ) + ( ipat [ 2 , 1 ] or ipat [ 2 , 0 ] ) + ( ipat [ 1 , 0 ] or ipat [ 0 , 0 ] ) )
if min ( n1 , n2 ) not in ( 2 , 3 ) :
thin_table [ : , i ] = True
continue
thin_table [ 0 , i ] = ( ( pat [ 0 , 1 ] or pat [ 0 , 2 ] or not pat [ 2 , 2 ] ) and pat [ 1 , 2 ] )
thin_table [ 1 , i ] = ( ( pat [ 2 , 1 ] or pat [ 2 , 0 ] or not pat [ 0 , 0 ] ) and pat [ 1 , 0 ] )
if mask is None :
masked_image = image . copy ( )
else :
masked_image = image . copy ( )
masked_image [ ~ mask ] = False
index_i , index_j , masked_image = prepare_for_index_lookup ( masked_image , False )
if iterations is None :
iterations = len ( index_i )
for i in range ( iterations ) :
hit_count = len ( index_i )
for j in range ( 2 ) :
index_i , index_j , = index_lookup ( index_i , index_j , masked_image , thin_table [ j ] , 1 )
if hit_count == len ( index_i ) :
break
masked_image = extract_from_image_lookup ( image , index_i , index_j )
if not mask is None :
masked_image [ ~ mask ] = masked_image [ ~ mask ]
return masked_image
|
async def load_kube_config ( config_file = None , context = None , client_configuration = None , persist_config = True ) :
"""Loads authentication and cluster information from kube - config file
and stores them in kubernetes . client . configuration .
: param config _ file : Name of the kube - config file .
: param context : set the active context . If is set to None , current _ context
from config file will be used .
: param client _ configuration : The kubernetes . client . Configuration to
set configs to .
: param persist _ config : If True , config file will be updated when changed
( e . g GCP token refresh ) ."""
|
if config_file is None :
config_file = KUBE_CONFIG_DEFAULT_LOCATION
loader = _get_kube_config_loader_for_yaml_file ( config_file , active_context = context , persist_config = persist_config )
if client_configuration is None :
config = type . __call__ ( Configuration )
await loader . load_and_set ( config )
Configuration . set_default ( config )
else :
await loader . load_and_set ( client_configuration )
return loader
|
def addRnaQuantMetadata ( self , fields ) :
"""data elements are :
Id , annotations , description , name , readGroupId
where annotations is a comma separated list"""
|
self . _featureSetIds = fields [ "feature_set_ids" ] . split ( ',' )
self . _description = fields [ "description" ]
self . _name = fields [ "name" ]
self . _biosampleId = fields . get ( "biosample_id" , "" )
if fields [ "read_group_ids" ] == "" :
self . _readGroupIds = [ ]
else :
self . _readGroupIds = fields [ "read_group_ids" ] . split ( ',' )
if fields [ "programs" ] == "" :
self . _programs = [ ]
else : # Need to use program Id ' s here to generate a list of Programs
# for now set to empty
self . _programs = [ ]
|
def update_config ( self , config , timeout = - 1 ) :
"""Updates the remote server configuration and the automatic backup schedule for backup .
Args :
config ( dict ) : Object to update .
timeout :
Timeout in seconds . Wait for task completion by default . The timeout does not abort the operation
in OneView , just stop waiting for its completion .
Returns :
dict : Backup details ."""
|
return self . _client . update ( config , uri = self . URI + "/config" , timeout = timeout )
|
def __upload_chunk ( self , resource , chunk_size , bytes , bytes_start , bytes_read ) :
"""Uploads a single chunk of a multi - chunk upload ."""
|
# note : string conversion required here due to open encoding bug in requests - oauthlib .
headers = { 'content-type' : self . content_type , 'content-length' : str ( min ( [ chunk_size , self . _file_size - bytes_read ] ) ) , 'content-range' : "bytes {0}-{1}/{2}" . format ( bytes_start , bytes_read - 1 , self . _file_size ) }
return Request ( self . _client , 'put' , resource , domain = self . _DEFAULT_DOMAIN , headers = headers , body = bytes ) . perform ( )
|
def _connect ( self ) :
"""Establish connection to MySQL Database ."""
|
if self . _connParams :
self . _conn = MySQLdb . connect ( ** self . _connParams )
else :
self . _conn = MySQLdb . connect ( '' )
|
def _call ( self , x , out = None ) :
"""Implement ` ` self ( x [ , out ] ) ` ` ."""
|
if out is None :
return self . operator ( x * self . vector )
else :
tmp = self . domain . element ( )
x . multiply ( self . vector , out = tmp )
self . operator ( tmp , out = out )
|
def render_alert ( content , alert_type = None , dismissable = True ) :
"""Render a Bootstrap alert"""
|
button = ""
if not alert_type :
alert_type = "info"
css_classes = [ "alert" , "alert-" + text_value ( alert_type ) ]
if dismissable :
css_classes . append ( "alert-dismissable" )
button = ( '<button type="button" class="close" ' + 'data-dismiss="alert" aria-hidden="true">×</button>' )
button_placeholder = "__BUTTON__"
return mark_safe ( render_tag ( "div" , attrs = { "class" : " " . join ( css_classes ) } , content = button_placeholder + text_value ( content ) , ) . replace ( button_placeholder , button ) )
|
def key ( string ) :
"""Return a Czech sort key for the given string
: param string : string ( unicode in Python 2)
Comparing the sort keys of two strings will give the result according
to how the strings would compare in Czech collation order , i . e .
` ` key ( s1 ) < key ( s2 ) ` ` < = > ` ` s1 ` ` comes before ` ` s2 ` `
The structure of the sort key may change in the future .
The only operations guaranteed to work on it are comparisons and equality
checks ( < , = = , etc . ) against other keys ."""
|
# The multi - level key is a nested tuple containing strings and ints .
# The tuple contains sub - keys that roughly correspond to levels in
# UTS # 10 ( http : / / unicode . org / reports / tr10 / ) . Except for fallback strings
# at the end , each contains a tuple of typically one key per element / letter .
# - Alphabet :
# Separators ( 0 , p , l , w )
# p : - no . of paragraph separators
# l : - no . of line separators
# w : - no . of word separators ( spaces )
# Letters ( 1 , l ) ; l is the base letter , lowercased
# Special letters : ' č ' shows up as ' cx ' ; ' ř ' as ' rx ' , etc .
# the ' ch ' digraph becomes ' hx '
# Numbers ( 2 , n ) ; n is int ( numeric value * 100)
# Missing for non - letters
# - Diacritics ( p , n , s )
# p : position ( above , below , behind , in front , in / over / around , unknown )
# ( as a sorted tuple of indices )
# s : shape ( dot , grave , breve , . . . , unknown )
# ( as a sorted tuple of indices )
# Missing for non - letters ; empty if diacritics included in base ( e . g . ř )
# - Case : True for uppercased letters
# Missing for non - letters
# - Punctuation : see PUNCTUATION _ MAP below
# - ( fallback ) NFKD - normalized string
# - ( fallback ) original string
subkeys = [ ] , [ ] , [ ] , [ ]
add_alphabet = subkeys [ 0 ] . append
add_diacritic = subkeys [ 1 ] . append
add_case = subkeys [ 2 ] . append
add_punctuation = subkeys [ 3 ] . append
skip = 0
normal = nfkd ( string ) . rstrip ( )
diacritics = [ ]
for i , char in enumerate ( normal ) :
if skip > 0 :
skip -= 1
continue
category = get_category ( char )
cat0 , cat1 = category
if cat0 == 'L' : # Letter ( Lowercase , Modifier , Other , Titlecase , Uppercase )
char_lower = char . lower ( )
found = False
if char_lower in DECOMPOSING_EXTRAS : # stuff like Ł doesn ' t decompose in Unicode ; do it manually
char_lower , _extra_diacritics = DECOMPOSING_EXTRAS [ char ]
diacritics . extend ( _extra_diacritics )
for next in normal [ i + 1 : ] :
if next == HACEK and char_lower in ( 'c' , 'r' , 's' , 'z' ) :
skip += 1
char_lower = char_lower + 'x'
elif char_lower == 'c' and next . lower ( ) == 'h' :
skip += 1
char_lower = 'hx'
break
elif next in DIACRITICS_MAP :
skip += 1
diacritics . extend ( DIACRITICS_MAP [ next ] )
elif unicodedata . category ( char ) [ 0 ] == 'M' :
skip += 1
diacritics . append ( ( POS_UNKNOWN , SH_UNKNOWN ) )
else :
break
add_alphabet ( ( 1 , char_lower ) )
if diacritics :
add_diacritic ( make_diacritics_key ( diacritics ) )
else :
add_diacritic ( ( ) )
add_case ( cat1 in ( 'u' , 't' ) )
# upper & title case
add_punctuation ( ( 0 , ) )
diacritics = [ ]
elif cat0 == 'Z' : # Separator ( Line , Paragraph , Space )
counts = { 'Zp' : 0 , 'Zl' : 0 , 'Zs' : 0 }
counts [ category ] = 1
for next in normal [ i + 1 : ] :
next_cat = get_category ( next )
if next_cat [ 0 ] == 'Z' :
counts [ next_cat ] += 1
skip += 1
else :
break
add_alphabet ( ( 0 , - counts [ 'Zp' ] , - counts [ 'Zl' ] , - counts [ 'Zs' ] ) )
add_diacritic ( ( ) )
add_case ( False )
add_punctuation ( ( 0 , ) )
elif char in DIACRITICS_BEFORE_MAP :
diacritics . extend ( DIACRITICS_BEFORE_MAP [ char ] )
elif char in DIACRITICS_MAP :
diacritics . extend ( DIACRITICS_MAP [ char ] )
elif char in PUNCTUATION_MAP :
add_punctuation ( PUNCTUATION_MAP [ char ] )
elif cat0 == 'P' : # Punctuation ( Connector , Dash , Open / Close , Final / Initial Quote , Other )
add_punctuation ( ( 3 , ) )
elif cat0 == 'N' : # Number ( Decimal digit , Letter , Other )
add_alphabet ( ( 2 , int ( unicodedata . numeric ( char , 0 ) ) * 100 ) )
add_diacritic ( ( ) )
add_case ( False )
add_punctuation ( ( 0 , ) )
elif cat0 == 'S' : # Symbol ( Currency , Modifier , Math )
add_punctuation ( ( 3 , ) )
elif cat0 == 'C' : # Other ( Control , Format , Not Assigned , Private Use , Surrogate )
pass
elif cat0 == 'M' : # Mark ( Spacing Combining , Enclosing , Nonspacing )
# TODO
diacritics . append ( ( POS_FRONT , SH_UNKNOWN ) )
else :
raise ValueError ( 'Unknown Unicode category' )
if diacritics :
add_diacritic ( make_diacritics_key ( diacritics ) )
diacritics = [ ]
return tuple ( tuple ( k ) for k in subkeys ) + ( normal , string )
|
def get_link ( self , task_id ) :
"""Get a ` ` LinkOfTrust ` ` by task id .
Args :
task _ id ( str ) : the task id to find .
Returns :
LinkOfTrust : the link matching the task id .
Raises :
CoTError : if no ` ` LinkOfTrust ` ` matches ."""
|
links = [ x for x in self . links if x . task_id == task_id ]
if len ( links ) != 1 :
raise CoTError ( "No single Link matches task_id {}!\n{}" . format ( task_id , self . dependent_task_ids ( ) ) )
return links [ 0 ]
|
def getGUA ( self , filterByPrefix = None ) :
"""get expected global unicast IPv6 address of Thread device
Args :
filterByPrefix : a given expected global IPv6 prefix to be matched
Returns :
a global IPv6 address"""
|
print '%s call getGUA' % self . port
print filterByPrefix
globalAddrs = [ ]
try : # get global addrs set if multiple
globalAddrs = self . getGlobal ( )
if filterByPrefix is None :
return globalAddrs [ 0 ]
else :
for line in globalAddrs :
fullIp = ModuleHelper . GetFullIpv6Address ( line )
if fullIp . startswith ( filterByPrefix ) :
return fullIp
print 'no global address matched'
return str ( globalAddrs [ 0 ] )
except Exception , e :
ModuleHelper . WriteIntoDebugLogger ( "getGUA() Error: " + str ( e ) )
|
def _cli ( cls , opts ) :
"""Setup logging via CLI options
If ` - - background ` - - set INFO level for root logger .
If ` - - logdir ` - - set logging with next params :
default Luigi ' s formatter ,
INFO level ,
output in logdir in ` luigi - server . log ` file"""
|
if opts . background :
logging . getLogger ( ) . setLevel ( logging . INFO )
return True
if opts . logdir :
logging . basicConfig ( level = logging . INFO , format = cls . _log_format , filename = os . path . join ( opts . logdir , "luigi-server.log" ) )
return True
return False
|
def _MapLegacyArgs ( nt , message , ref ) :
"""Maps UserNotification object to legacy GRRUser . Notify arguments ."""
|
unt = rdf_objects . UserNotification . Type
if nt == unt . TYPE_CLIENT_INTERROGATED :
return [ "Discovery" , aff4 . ROOT_URN . Add ( ref . client . client_id ) , _HostPrefix ( ref . client . client_id ) + message , "" , ]
elif nt == unt . TYPE_CLIENT_APPROVAL_REQUESTED :
return [ "GrantAccess" , aff4 . ROOT_URN . Add ( "ACL" ) . Add ( ref . approval_request . subject_id ) . Add ( ref . approval_request . requestor_username ) . Add ( ref . approval_request . approval_id ) , message , "" , ]
elif nt == unt . TYPE_HUNT_APPROVAL_REQUESTED :
return [ "GrantAccess" , aff4 . ROOT_URN . Add ( "ACL" ) . Add ( "hunts" ) . Add ( ref . approval_request . subject_id ) . Add ( ref . approval_request . requestor_username ) . Add ( ref . approval_request . approval_id ) , message , "" , ]
elif nt == unt . TYPE_CRON_JOB_APPROVAL_REQUESTED :
return [ "GrantAccess" , aff4 . ROOT_URN . Add ( "ACL" ) . Add ( "cron" ) . Add ( ref . approval_request . subject_id ) . Add ( ref . approval_request . requestor_username ) . Add ( ref . approval_request . approval_id ) , message , "" , ]
elif nt == unt . TYPE_CLIENT_APPROVAL_GRANTED :
return [ "ViewObject" , aff4 . ROOT_URN . Add ( ref . client . client_id ) , message , "" , ]
elif nt == unt . TYPE_HUNT_APPROVAL_GRANTED :
return [ "ViewObject" , aff4 . ROOT_URN . Add ( "hunts" ) . Add ( ref . hunt . hunt_id ) , message , "" , ]
elif nt == unt . TYPE_CRON_JOB_APPROVAL_GRANTED :
return [ "ViewObject" , aff4 . ROOT_URN . Add ( "cron" ) . Add ( ref . cron_job . cron_job_id ) , message , "" , ]
elif nt == unt . TYPE_VFS_FILE_COLLECTED :
return [ "ViewObject" , ref . vfs_file . ToURN ( ) , _HostPrefix ( ref . vfs_file . client_id ) + message , "" , ]
elif nt == unt . TYPE_VFS_FILE_COLLECTION_FAILED :
return [ "ViewObject" , ref . vfs_file . ToURN ( ) , _HostPrefix ( ref . vfs_file . client_id ) + message , "" , ]
elif nt == unt . TYPE_HUNT_STOPPED :
urn = aff4 . ROOT_URN . Add ( "hunts" ) . Add ( ref . hunt . hunt_id )
return [ "ViewObject" , urn , message , urn , ]
elif nt == unt . TYPE_FILE_ARCHIVE_GENERATED :
return [ "ArchiveGenerationFinished" , None , message , "" , ]
elif nt == unt . TYPE_FILE_ARCHIVE_GENERATION_FAILED :
return [ "Error" , None , message , "" , ]
elif nt == unt . TYPE_FLOW_RUN_COMPLETED :
urn = None
if ref . flow and ref . flow . client_id and ref . flow . flow_id :
urn = aff4 . ROOT_URN . Add ( ref . flow . client_id ) . Add ( "flows" ) . Add ( ref . flow . flow_id )
return [ "ViewObject" , urn , _HostPrefix ( ref . flow . client_id ) + message , "" , ]
elif nt == unt . TYPE_FLOW_RUN_FAILED :
client_id = None
urn = None
prefix = ""
if ref . flow is not None :
client_id = ref . flow . client_id
if client_id :
prefix = _HostPrefix ( client_id )
if ref . flow . flow_id :
urn = aff4 . ROOT_URN . Add ( ref . flow . client_id ) . Add ( "flows" ) . Add ( ref . flow . flow_id )
return [ "FlowStatus" , client_id , prefix + message , urn , ]
elif nt == unt . TYPE_VFS_LIST_DIRECTORY_COMPLETED :
return [ "ViewObject" , ref . vfs_file . ToURN ( ) , message , "" , ]
elif nt == unt . TYPE_VFS_RECURSIVE_LIST_DIRECTORY_COMPLETED :
return [ "ViewObject" , ref . vfs_file . ToURN ( ) , message , "" , ]
else :
raise NotImplementedError ( )
|
def memsize ( self ) :
"""Total array cell + indexes size"""
|
return self . size + 1 + TYPE . size ( gl . BOUND_TYPE ) * len ( self . bounds )
|
def get_search_fields ( cls ) :
"""Returns search fields in sfdict"""
|
sfdict = { }
for klass in tuple ( cls . __bases__ ) + ( cls , ) :
if hasattr ( klass , 'search_fields' ) :
sfdict . update ( klass . search_fields )
return sfdict
|
def handle ( self , * args , ** options ) :
"""Create Customer objects for Subscribers without Customer objects associated ."""
|
for subscriber in get_subscriber_model ( ) . objects . filter ( djstripe_customers = None ) : # use get _ or _ create in case of race conditions on large subscriber bases
Customer . get_or_create ( subscriber = subscriber )
print ( "Created subscriber for {0}" . format ( subscriber . email ) )
|
def subscribe ( self , subject , callback , queue = '' ) :
"""Subscribe will express interest in the given subject . The subject can
have wildcards ( partial : * , full : > ) . Messages will be delivered to the
associated callback .
Args :
subject ( string ) : a string with the subject
callback ( function ) : callback to be called"""
|
s = Subscription ( sid = self . _next_sid , subject = subject , queue = queue , callback = callback , connetion = self )
self . _subscriptions [ s . sid ] = s
self . _send ( 'SUB %s %s %d' % ( s . subject , s . queue , s . sid ) )
self . _next_sid += 1
return s
|
def sky_to_image ( shape_list , header ) :
"""Converts a ` ShapeList ` into shapes with coordinates in image coordinates
Parameters
shape _ list : ` pyregion . ShapeList `
The ShapeList to convert
header : ` ~ astropy . io . fits . Header `
Specifies what WCS transformations to use .
Yields
shape , comment : Shape , str
Shape with image coordinates and the associated comment
Note
The comments in the original ` ShapeList ` are unaltered"""
|
for shape , comment in shape_list :
if isinstance ( shape , Shape ) and ( shape . coord_format not in image_like_coordformats ) :
new_coords = convert_to_imagecoord ( shape , header )
l1n = copy . copy ( shape )
l1n . coord_list = new_coords
l1n . coord_format = "image"
yield l1n , comment
elif isinstance ( shape , Shape ) and shape . coord_format == "physical" :
if header is None :
raise RuntimeError ( "Physical coordinate is not known." )
new_coordlist = convert_physical_to_imagecoord ( shape , header )
l1n = copy . copy ( shape )
l1n . coord_list = new_coordlist
l1n . coord_format = "image"
yield l1n , comment
else :
yield shape , comment
|
def do_up ( self , arg ) :
"""u ( p ) [ count ]
Move the current frame count ( default one ) levels up in the
stack trace ( to an older frame ) ."""
|
if self . curindex == 0 :
self . error ( 'Oldest frame' )
return
try :
count = int ( arg or 1 )
except ValueError :
self . error ( 'Invalid frame count (%s)' % arg )
return
if count < 0 :
newframe = 0
else :
newframe = max ( 0 , self . curindex - count )
self . _select_frame ( newframe )
|
def _parse_header_params ( self , header_param_lines ) :
'''解析头部参数'''
|
headers = { }
for line in header_param_lines :
if line . strip ( ) : # 跳过空行
key , val = line . split ( ':' , 1 )
headers [ key . lower ( ) ] = val . strip ( )
return headers
|
def iter_files ( root , exts = None , recursive = False ) :
"""Iterate over file paths within root filtered by specified extensions .
: param compat . string _ types root : Root folder to start collecting files
: param iterable exts : Restrict results to given file extensions
: param bool recursive : Wether to walk the complete directory tree
: rtype collections . Iterable [ str ] : absolute file paths with given extensions"""
|
if exts is not None :
exts = set ( ( x . lower ( ) for x in exts ) )
def matches ( e ) :
return ( exts is None ) or ( e in exts )
if recursive is False :
for entry in compat . scandir ( root ) :
if compat . has_scandir :
ext = splitext ( entry . name ) [ - 1 ] . lstrip ( '.' ) . lower ( )
if entry . is_file ( ) and matches ( ext ) :
yield entry . path
else :
ext = splitext ( entry ) [ - 1 ] . lstrip ( '.' ) . lower ( )
if not compat . isdir ( entry ) and matches ( ext ) :
yield join ( root , entry )
else :
for root , folders , files in compat . walk ( root ) :
for f in files :
ext = splitext ( f ) [ - 1 ] . lstrip ( '.' ) . lower ( )
if matches ( ext ) :
yield join ( root , f )
|
def print_inheritance ( doc , stream ) : # type : ( List [ Dict [ Text , Any ] ] , IO ) - > None
"""Write a Grapviz inheritance graph for the supplied document ."""
|
stream . write ( "digraph {\n" )
for entry in doc :
if entry [ "type" ] == "record" :
label = name = shortname ( entry [ "name" ] )
fields = entry . get ( "fields" , [ ] )
if fields :
label += "\\n* %s\\l" % ( "\\l* " . join ( shortname ( field [ "name" ] ) for field in fields ) )
shape = "ellipse" if entry . get ( "abstract" ) else "box"
stream . write ( "\"%s\" [shape=%s label=\"%s\"];\n" % ( name , shape , label ) )
if "extends" in entry :
for target in aslist ( entry [ "extends" ] ) :
stream . write ( "\"%s\" -> \"%s\";\n" % ( shortname ( target ) , name ) )
stream . write ( "}\n" )
|
def redo ( self ) :
"""Redo the last action .
This will call ` redo ( ) ` on all controllers involved in this action ."""
|
controllers = self . forward ( )
if controllers is None :
ups = ( )
else :
ups = tuple ( [ controller . redo ( ) for controller in controllers ] )
if self . process_ups is not None :
return self . process_ups ( ups )
else :
return ups
|
def get_lines ( command ) :
"""Run a command and return lines of output
: param str command : the command to run
: returns : list of whitespace - stripped lines output by command"""
|
stdout = get_output ( command )
return [ line . strip ( ) . decode ( 'utf-8' ) for line in stdout . splitlines ( ) ]
|
def p_expr_LE_expr ( p ) :
"""expr : expr LE expr"""
|
p [ 0 ] = make_binary ( p . lineno ( 2 ) , 'LE' , p [ 1 ] , p [ 3 ] , lambda x , y : x <= y )
|
def add_method ( self , pattern ) :
"""Decorator to add new dispatch functions ."""
|
def wrap ( f ) :
def frozen_function ( class_instance , f ) :
def _ ( pattern , * args , ** kwargs ) :
return f ( class_instance , pattern , * args , ** kwargs )
return _
self . functions . append ( ( frozen_function ( self , f ) , pattern ) )
return f
return wrap
|
def valuetype_class ( self ) :
"""Return the valuetype class , if one is defined , or a built - in type if it isn ' t"""
|
from ambry . valuetype import resolve_value_type
if self . valuetype :
return resolve_value_type ( self . valuetype )
else :
return resolve_value_type ( self . datatype )
|
def get_miller_index_from_site_indexes ( self , site_ids , round_dp = 4 , verbose = True ) :
"""Get the Miller index of a plane from a set of sites indexes .
A minimum of 3 sites are required . If more than 3 sites are given
the best plane that minimises the distance to all points will be
calculated .
Args :
site _ ids ( list of int ) : A list of site indexes to consider . A
minimum of three site indexes are required . If more than three
sites are provided , the best plane that minimises the distance
to all sites will be calculated .
round _ dp ( int , optional ) : The number of decimal places to round the
miller index to .
verbose ( bool , optional ) : Whether to print warnings .
Returns :
( tuple ) : The Miller index ."""
|
return self . lattice . get_miller_index_from_coords ( self . frac_coords [ site_ids ] , coords_are_cartesian = False , round_dp = round_dp , verbose = verbose )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.