code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def full_name(first_name, last_name, username, **extra):
"""Return full name or username."""
name = " ".join(n for n in [first_name, last_name] if n)
if not name:
return username
return name
|
Return full name or username.
|
def _are_scopes_sufficient(authorized_scopes, sufficient_scopes):
"""Check if a list of authorized scopes satisfies any set of sufficient scopes.
Args:
authorized_scopes: a list of strings, return value from oauth.get_authorized_scopes
sufficient_scopes: a set of sets of strings, return value from _process_scopes
"""
for sufficient_scope_set in sufficient_scopes:
if sufficient_scope_set.issubset(authorized_scopes):
return True
return False
|
Check if a list of authorized scopes satisfies any set of sufficient scopes.
Args:
authorized_scopes: a list of strings, return value from oauth.get_authorized_scopes
sufficient_scopes: a set of sets of strings, return value from _process_scopes
|
def targeted_conjugate_about(tensor: np.ndarray,
target: np.ndarray,
indices: Sequence[int],
conj_indices: Sequence[int] = None,
buffer: Optional[np.ndarray] = None,
out: Optional[np.ndarray] = None) -> np.ndarray:
r"""Conjugates the given tensor about the target tensor.
This method computes a target tensor conjugated by another tensor.
Here conjugate is used in the sense of conjugating by a matrix, i.a.
A conjugated about B is $A B A^\dagger$ where $\dagger$ represents the
conjugate transpose.
Abstractly this compute $A \cdot B \cdot A^\dagger$ where A and B are
multi-dimensional arrays, and instead of matrix multiplication $\cdot$
is a contraction between the given indices (indices for first $\cdot$,
conj_indices for second $\cdot$).
More specifically this computes
sum tensor_{i_0,...,i_{r-1},j_0,...,j_{r-1}}
* target_{k_0,...,k_{r-1},l_0,...,l_{r-1}
* tensor_{m_0,...,m_{r-1},n_0,...,n_{r-1}}^*
where the sum is over indices where j_s = k_s and s is in `indices`
and l_s = m_s and s is in `conj_indices`.
Args:
tensor: The tensor that will be conjugated about the target tensor.
target: The tensor that will receive the conjugation.
indices: The indices which will be contracted between the tensor and
target.
conj_indices; The indices which will be contracted between the
complex conjugate of the tensor and the target. If this is None,
then these will be the values in indices plus half the number
of dimensions of the target (`ndim`). This is the most common case
and corresponds to the case where the target is an operator on
a n-dimensional tensor product space (here `n` would be `ndim`).
buffer: A buffer to store partial results in. If not specified or None,
a new buffer is used.
out: The buffer to store the results in. If not specified or None, a new
buffer is used. Must have the same shape as target.
Returns:
The result the conjugation.
"""
conj_indices = conj_indices or [i + target.ndim // 2 for i in indices]
first_multiply = targeted_left_multiply(tensor, target, indices, out=buffer)
return targeted_left_multiply(np.conjugate(tensor),
first_multiply,
conj_indices,
out=out)
|
r"""Conjugates the given tensor about the target tensor.
This method computes a target tensor conjugated by another tensor.
Here conjugate is used in the sense of conjugating by a matrix, i.a.
A conjugated about B is $A B A^\dagger$ where $\dagger$ represents the
conjugate transpose.
Abstractly this compute $A \cdot B \cdot A^\dagger$ where A and B are
multi-dimensional arrays, and instead of matrix multiplication $\cdot$
is a contraction between the given indices (indices for first $\cdot$,
conj_indices for second $\cdot$).
More specifically this computes
sum tensor_{i_0,...,i_{r-1},j_0,...,j_{r-1}}
* target_{k_0,...,k_{r-1},l_0,...,l_{r-1}
* tensor_{m_0,...,m_{r-1},n_0,...,n_{r-1}}^*
where the sum is over indices where j_s = k_s and s is in `indices`
and l_s = m_s and s is in `conj_indices`.
Args:
tensor: The tensor that will be conjugated about the target tensor.
target: The tensor that will receive the conjugation.
indices: The indices which will be contracted between the tensor and
target.
conj_indices; The indices which will be contracted between the
complex conjugate of the tensor and the target. If this is None,
then these will be the values in indices plus half the number
of dimensions of the target (`ndim`). This is the most common case
and corresponds to the case where the target is an operator on
a n-dimensional tensor product space (here `n` would be `ndim`).
buffer: A buffer to store partial results in. If not specified or None,
a new buffer is used.
out: The buffer to store the results in. If not specified or None, a new
buffer is used. Must have the same shape as target.
Returns:
The result the conjugation.
|
def extract_author_keywords(skw_db, ckw_db, fulltext):
"""Find out human defined keywords in a text string.
Searches for the string "Keywords:" and its declinations and matches the
following words.
:param skw_db: list single kw object
:param ckw_db: list of composite kw objects
:param fulltext: utf-8 string
:return: dictionary of matches in a formt {
<keyword object>, [matched skw or ckw object, ....]
}
or empty {}
"""
akw = {}
for k, v in get_author_keywords(skw_db, ckw_db, fulltext).items():
akw[KeywordToken(k, type='author-kw')] = v
return akw
|
Find out human defined keywords in a text string.
Searches for the string "Keywords:" and its declinations and matches the
following words.
:param skw_db: list single kw object
:param ckw_db: list of composite kw objects
:param fulltext: utf-8 string
:return: dictionary of matches in a formt {
<keyword object>, [matched skw or ckw object, ....]
}
or empty {}
|
def recommend_from_interactions(
self, observed_items, k=10, exclude=None, items=None,
new_user_data=None, new_item_data=None,
exclude_known=True, diversity=0, random_seed=None,
verbose=True):
"""
Recommend the ``k`` highest scored items based on the
interactions given in `observed_items.`
Parameters
----------
observed_items : SArray, SFrame, or list
A list/SArray of items to use to make recommendations, or
an SFrame of items and optionally ratings and/or other
interaction data. The model will then recommend the most
similar items to those given. If ``observed_items`` has a user
column, then it must be only one user, and the additional
interaction data stored in the model is also used to make
recommendations.
k : int, optional
The number of recommendations to generate.
items : SArray, SFrame, or list, optional
Restricts the items from which recommendations can be
made. ``items`` must be an SArray, list, or SFrame with a
single column containing items, and all recommendations
will be made from this pool of items. This can be used,
for example, to restrict the recommendations to items
within a particular category or genre. By default,
recommendations are made from all items present when the
model was trained.
new_user_data : SFrame, optional
``new_user_data`` may give additional user data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the user
data passed to ``create``.
new_item_data : SFrame, optional
``new_item_data`` may give additional item data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the item
data passed to ``create``.
exclude : SFrame, optional
An :class:`~turicreate.SFrame` of items or user / item
pairs. The column names must be equal to the user and
item columns of the main data, and it provides the model
with user/item pairs to exclude from the recommendations.
These user-item-pairs are always excluded from the
predictions, even if exclude_known is False.
exclude_known : bool, optional
By default, all user-item interactions previously seen in
the training data, or in any new data provided using
new_observation_data.., are excluded from the
recommendations. Passing in ``exclude_known = False``
overrides this behavior.
diversity : non-negative float, optional
If given, then the recommend function attempts chooses a set
of `k` items that are both highly scored and different from
other items in that set. It does this by first retrieving
``k*(1+diversity)`` recommended items, then randomly
choosing a diverse set from these items. Suggested values
for diversity are between 1 and 3.
random_seed : int, optional
If diversity is larger than 0, then some randomness is used;
this controls the random seed to use for randomization. If
None, then it will be different each time.
verbose : bool, optional
If True, print the progress of generating recommendation.
Returns
-------
out : SFrame
A SFrame with the top ranked items for each user. The
columns are: ``item_id``, *score*, and *rank*, where
``user_id`` and ``item_id`` match the user and item column
names specified at training time. The rank column is
between 1 and ``k`` and gives the relative score of that
item. The value of score depends on the method used for
recommendations.
observed_items: list, SArray, or SFrame
"""
column_types = self._get_data_schema()
user_id = self.user_id
item_id = self.item_id
user_type = column_types[user_id]
item_type = column_types[item_id]
if not hasattr(self, "_implicit_user_name"):
import hashlib
import time
self._implicit_user_name = None #("implicit-user-%s"
# % hashlib.md5("%0.20f" % time.time()).hexdigest()[:12])
if isinstance(observed_items, list):
observed_items = _SArray(observed_items, dtype = item_type)
if isinstance(observed_items, _SArray):
observed_items = _SFrame({self.item_id : observed_items})
if not isinstance(observed_items, _SFrame):
raise TypeError("observed_items must be a list or SArray of items, or an SFrame of items "
"and optionally ratings or other interaction information.")
# Don't modify the user's argument (if it's an SFrame).
observed_items = observed_items.copy()
# If a user id is present, then use that as the query user id
# (making sure there is only one present). If not, then use
# the local fake user id.
if user_id in observed_items.column_names():
main_user_value = observed_items[user_id][0]
if (observed_items[user_id] != main_user_value).any():
raise ValueError("To recommend items for more than one user, use `recommend()` and "
"supply new interactions using new_observation_data.")
users = _SArray([main_user_value], dtype = user_type)
else:
users = _SArray([self._implicit_user_name], dtype = user_type)
observed_items[user_id] = self._implicit_user_name
if observed_items[user_id].dtype != user_type:
observed_items[user_id] = observed_items[user_id].astype(user_type)
# Check the rest of the arguments.
if exclude is not None:
if isinstance(exclude, list):
exclude = _SArray(exclude, dtype = item_type)
if isinstance(exclude, _SArray):
exclude = _SFrame({item_id : exclude})
if user_id not in exclude.column_names():
exclude[user_id] = self._implicit_user_name
exclude[user_id] = exclude[user_id].astype(user_type)
recommendations = self.recommend(
users = users,
new_observation_data = observed_items,
k = k,
items = items,
new_user_data = new_user_data,
new_item_data = new_item_data,
exclude_known = exclude_known,
diversity = diversity,
random_seed = random_seed,
verbose = verbose)
del recommendations[user_id]
return recommendations
|
Recommend the ``k`` highest scored items based on the
interactions given in `observed_items.`
Parameters
----------
observed_items : SArray, SFrame, or list
A list/SArray of items to use to make recommendations, or
an SFrame of items and optionally ratings and/or other
interaction data. The model will then recommend the most
similar items to those given. If ``observed_items`` has a user
column, then it must be only one user, and the additional
interaction data stored in the model is also used to make
recommendations.
k : int, optional
The number of recommendations to generate.
items : SArray, SFrame, or list, optional
Restricts the items from which recommendations can be
made. ``items`` must be an SArray, list, or SFrame with a
single column containing items, and all recommendations
will be made from this pool of items. This can be used,
for example, to restrict the recommendations to items
within a particular category or genre. By default,
recommendations are made from all items present when the
model was trained.
new_user_data : SFrame, optional
``new_user_data`` may give additional user data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the user
data passed to ``create``.
new_item_data : SFrame, optional
``new_item_data`` may give additional item data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the item
data passed to ``create``.
exclude : SFrame, optional
An :class:`~turicreate.SFrame` of items or user / item
pairs. The column names must be equal to the user and
item columns of the main data, and it provides the model
with user/item pairs to exclude from the recommendations.
These user-item-pairs are always excluded from the
predictions, even if exclude_known is False.
exclude_known : bool, optional
By default, all user-item interactions previously seen in
the training data, or in any new data provided using
new_observation_data.., are excluded from the
recommendations. Passing in ``exclude_known = False``
overrides this behavior.
diversity : non-negative float, optional
If given, then the recommend function attempts chooses a set
of `k` items that are both highly scored and different from
other items in that set. It does this by first retrieving
``k*(1+diversity)`` recommended items, then randomly
choosing a diverse set from these items. Suggested values
for diversity are between 1 and 3.
random_seed : int, optional
If diversity is larger than 0, then some randomness is used;
this controls the random seed to use for randomization. If
None, then it will be different each time.
verbose : bool, optional
If True, print the progress of generating recommendation.
Returns
-------
out : SFrame
A SFrame with the top ranked items for each user. The
columns are: ``item_id``, *score*, and *rank*, where
``user_id`` and ``item_id`` match the user and item column
names specified at training time. The rank column is
between 1 and ``k`` and gives the relative score of that
item. The value of score depends on the method used for
recommendations.
observed_items: list, SArray, or SFrame
|
def boolean(self):
"""A mapping of this `StateVector` to a 2-D array containing all
binary bits as booleans, for each time point.
"""
try:
return self._boolean
except AttributeError:
nbits = len(self.bits)
boolean = numpy.zeros((self.size, nbits), dtype=bool)
for i, sample in enumerate(self.value):
boolean[i, :] = [int(sample) >> j & 1 for j in range(nbits)]
self._boolean = Array2D(boolean, name=self.name,
x0=self.x0, dx=self.dx, y0=0, dy=1)
return self.boolean
|
A mapping of this `StateVector` to a 2-D array containing all
binary bits as booleans, for each time point.
|
def get(cls, rkey):
"""Get image previously registered with key rkey.
If key not exist, raise StockImageException
"""
if rkey in cls._cached:
logger.info('Resource %s is in cache.' % rkey)
return cls._cached[rkey]
if rkey in cls._stock:
img = cls._load_image(rkey)
return img
else:
raise StockImageException('StockImage: %s not registered.' % rkey)
|
Get image previously registered with key rkey.
If key not exist, raise StockImageException
|
def limit(self, keys):
''' Remove all keys other than the keys specified.
'''
if not isinstance(keys, list) and not isinstance(keys, tuple):
keys = [keys]
remove_keys = [k for k in self.keys() if k not in keys]
for k in remove_keys:
self.pop(k)
|
Remove all keys other than the keys specified.
|
def extractUserStore(userAccount, extractionDestination, legacySiteAuthoritative=True):
"""
Move the SubStore for the given user account out of the given site store
completely. Place the user store's database directory into the given
destination directory.
@type userAccount: C{LoginAccount}
@type extractionDestination: C{FilePath}
@type legacySiteAuthoritative: C{bool}
@param legacySiteAuthoritative: before moving the user store, clear its
authentication information, copy that which is associated with it in the
site store rather than trusting its own. Currently this flag is necessary
(and defaults to true) because things like the ClickChronicle
password-changer gizmo still operate on the site store.
"""
if legacySiteAuthoritative:
# migrateDown() manages its own transactions, since it is copying items
# between two different stores.
userAccount.migrateDown()
av = userAccount.avatars
av.open().close()
def _():
# We're separately deleting several Items from the site store, then
# we're moving some files. If we cannot move the files, we don't want
# to delete the items.
# There is one unaccounted failure mode here: if the destination of the
# move is on a different mount point, the moveTo operation will fall
# back to a non-atomic copy; if all of the copying succeeds, but then
# part of the deletion of the source files fails, we will be left
# without a complete store in this site store's files directory, but
# the account Items will remain. This will cause odd errors on login
# and at other unpredictable times. The database is only one file, so
# we will either remove it all or none of it. Resolving this requires
# manual intervention currently: delete the substore's database
# directory and the account items (LoginAccount and LoginMethods)
# manually.
# However, this failure is extremely unlikely, as it would almost
# certainly indicate a misconfiguration of the permissions on the site
# store's files area. As described above, a failure of the call to
# os.rename(), if the platform's rename is atomic (which it generally
# is assumed to be) will not move any files and will cause a revert of
# the transaction which would have deleted the accompanying items.
av.deleteFromStore()
userAccount.deleteLoginMethods()
userAccount.deleteFromStore()
av.storepath.moveTo(extractionDestination)
userAccount.store.transact(_)
|
Move the SubStore for the given user account out of the given site store
completely. Place the user store's database directory into the given
destination directory.
@type userAccount: C{LoginAccount}
@type extractionDestination: C{FilePath}
@type legacySiteAuthoritative: C{bool}
@param legacySiteAuthoritative: before moving the user store, clear its
authentication information, copy that which is associated with it in the
site store rather than trusting its own. Currently this flag is necessary
(and defaults to true) because things like the ClickChronicle
password-changer gizmo still operate on the site store.
|
def thickness_hydrostatic_from_relative_humidity(pressure, temperature, relative_humidity,
**kwargs):
r"""Calculate the thickness of a layer given pressure, temperature and relative humidity.
Similar to ``thickness_hydrostatic``, this thickness calculation uses the pressure,
temperature, and relative humidity profiles via the hypsometric equation with virtual
temperature adjustment.
.. math:: Z_2 - Z_1 = -\frac{R_d}{g} \int_{p_1}^{p_2} T_v d\ln p,
which is based off of Equation 3.24 in [Hobbs2006]_. Virtual temperature is calculated
from the profiles of temperature and relative humidity.
This assumes a hydrostatic atmosphere.
Layer bottom and depth specified in pressure.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
temperature : `pint.Quantity`
Atmospheric temperature profile
relative_humidity : `pint.Quantity`
Atmospheric relative humidity profile. The relative humidity is expressed as a
unitless ratio in the range [0, 1]. Can also pass a percentage if proper units are
attached.
bottom : `pint.Quantity`, optional
The bottom of the layer in pressure. Defaults to the first observation.
depth : `pint.Quantity`, optional
The depth of the layer in hPa. Defaults to the full profile if bottom is not given,
and 100 hPa if bottom is given.
Returns
-------
`pint.Quantity`
The thickness of the layer in meters.
See Also
--------
thickness_hydrostatic, pressure_to_height_std, virtual_temperature,
mixing_ratio_from_relative_humidity
"""
bottom = kwargs.pop('bottom', None)
depth = kwargs.pop('depth', None)
mixing = mixing_ratio_from_relative_humidity(relative_humidity, temperature, pressure)
return thickness_hydrostatic(pressure, temperature, mixing=mixing, bottom=bottom,
depth=depth)
|
r"""Calculate the thickness of a layer given pressure, temperature and relative humidity.
Similar to ``thickness_hydrostatic``, this thickness calculation uses the pressure,
temperature, and relative humidity profiles via the hypsometric equation with virtual
temperature adjustment.
.. math:: Z_2 - Z_1 = -\frac{R_d}{g} \int_{p_1}^{p_2} T_v d\ln p,
which is based off of Equation 3.24 in [Hobbs2006]_. Virtual temperature is calculated
from the profiles of temperature and relative humidity.
This assumes a hydrostatic atmosphere.
Layer bottom and depth specified in pressure.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
temperature : `pint.Quantity`
Atmospheric temperature profile
relative_humidity : `pint.Quantity`
Atmospheric relative humidity profile. The relative humidity is expressed as a
unitless ratio in the range [0, 1]. Can also pass a percentage if proper units are
attached.
bottom : `pint.Quantity`, optional
The bottom of the layer in pressure. Defaults to the first observation.
depth : `pint.Quantity`, optional
The depth of the layer in hPa. Defaults to the full profile if bottom is not given,
and 100 hPa if bottom is given.
Returns
-------
`pint.Quantity`
The thickness of the layer in meters.
See Also
--------
thickness_hydrostatic, pressure_to_height_std, virtual_temperature,
mixing_ratio_from_relative_humidity
|
def tamper_file(filepath, mode='e', proba=0.03, block_proba=None, blocksize=65535, burst_length=None, header=None):
""" Randomly tamper a file's content """
if header and header > 0:
blocksize = header
tamper_count = 0 # total number of characters tampered in the file
total_size = 0 # total buffer size, NOT necessarily the total file size (depends if you set header or not)
with open(filepath, "r+b") as fh: # 'r+' allows to read AND overwrite characters. Else any other option won't allow both ('a+' read and append, 'w+' erases the file first then allow to read and write), and 'b' is just for binary because we can open any filetype.
if proba >= 1: proba = 1.0/os.fstat(fh.fileno()).st_size * proba # normalizing probability if it's an integer (ie: the number of characters to flip on average)
buf = fh.read(blocksize) # We process blocks by blocks because it's a lot faster (IO is still the slowest operation in any computing system)
while len(buf) > 0:
total_size += len(buf)
if not block_proba or (random.random() < block_proba): # If block tampering is enabled, process only if this block is selected by probability
pos2tamper = []
burst_remain = 0 # if burst is enabled and corruption probability is triggered, then we will here store the remaining number of characters to corrupt (the length is uniformly sampled over the range specified in arguments)
# Create the list of bits to tamper (it's a lot more efficient to precompute the list of characters to corrupt, and then modify in the file the characters all at once)
for i in xrange(len(buf)):
if burst_remain > 0 or (random.random() < proba): # Corruption probability: corrupt only if below the bit-flip proba
pos2tamper.append(i) # keep this character's position in the to-be-corrupted list
if burst_remain > 0: # if we're already in a burst, we minus one and continue onto the next character
burst_remain -= 1
elif burst_length: # else we're not in a burst, we create one (triggered by corruption probability: as soon as one character triggers the corruption probability, then we do a burst)
burst_remain = random.randint(burst_length[0], burst_length[1]) - 1 # if burst is enabled, then we randomly (uniformly) pick a random length for the burst between the range specified, and since we already tampered one character, we minus 1
# If there's any character to tamper in the list, we tamper the string
if pos2tamper:
tamper_count = tamper_count + len(pos2tamper)
#print("Before: %s" % buf)
buf = bytearray(buf) # Strings in Python are immutable, thus we need to convert to a bytearray
for pos in pos2tamper:
if mode == 'e' or mode == 'erasure': # Erase the character (set a null byte)
buf[pos] = 0
elif mode == 'n' or mode == 'noise': # Noising the character (set a random ASCII character)
buf[pos] = random.randint(0,255)
#print("After: %s" % buf)
# Overwriting the string into the file
prevpos = fh.tell() # need to store and place back the seek cursor because after the write, if it's the end of the file, the next read may be buggy (getting characters that are not part of the file)
fh.seek(fh.tell()-len(buf)) # Move the cursor at the beginning of the string we just read
fh.write(buf) # Overwrite it
fh.seek(prevpos) # Restore the previous position after the string
# If we only tamper the header, we stop here by setting the buffer to an empty string
if header and header > 0:
buf = ''
# Else we continue to the next data block
else:
# Load the next characters from file
buf = fh.read(blocksize)
return [tamper_count, total_size]
|
Randomly tamper a file's content
|
def _permission_trees(permissions):
"""Get the cached permission tree, or build a new one if necessary."""
treecache = PermissionTreeCache()
cached = treecache.get()
if not cached:
tree = PermissionTreeBuilder()
for permission in permissions:
tree.insert(permission)
result = tree.serialize()
treecache.set(result)
return result
return cached
|
Get the cached permission tree, or build a new one if necessary.
|
def parse_response(self, response):
"""
Parse XMLRPC response
"""
parser, unmarshaller = self.getparser()
parser.feed(response.text.encode('utf-8'))
parser.close()
return unmarshaller.close()
|
Parse XMLRPC response
|
def update(self, state, tnow):
'''update the threat state'''
self.state = state
self.update_time = tnow
|
update the threat state
|
def get_maxsing(self,eigthresh=1.0e-5):
""" Get the number of singular components with a singular
value ratio greater than or equal to eigthresh
Parameters
----------
eigthresh : float
the ratio of the largest to smallest singular value
Returns
-------
int : int
number of singular components
"""
#sthresh =np.abs((self.s.x / self.s.x[0]) - eigthresh)
sthresh = self.s.x.flatten()/self.s.x[0]
ising = 0
for i,st in enumerate(sthresh):
if st > eigthresh:
ising += 1
#return max(1,i)
else:
break
#return max(1,np.argmin(sthresh))
return max(1,ising)
|
Get the number of singular components with a singular
value ratio greater than or equal to eigthresh
Parameters
----------
eigthresh : float
the ratio of the largest to smallest singular value
Returns
-------
int : int
number of singular components
|
def _check_params(self, *args, **kwargs):
""" 检查参数信息是否匹配 """
terms = kwargs.get('terms')
if not terms:
raise crawler.CrawlerParamsError('Terms needed!')
if not isinstance(terms, list):
terms = [terms]
if len(terms) != len(self.base['terms']):
# raise crawler.CrawlerParamsError('args length must match base[terms]')
log.debug('args length {} != {} must match base[terms]'.format(len(terms), len(self.base['terms'])))
|
检查参数信息是否匹配
|
def map_template(category, template_list):
"""
Given a file path and an acceptable list of templates, return the
best-matching template's path relative to the configured template
directory.
Arguments:
category -- The path to map
template_list -- A template to look up (as a string), or a list of templates.
"""
if isinstance(template_list, str):
template_list = [template_list]
for template in template_list:
path = os.path.normpath(category)
while path is not None:
for extension in ['', '.html', '.htm', '.xml', '.json']:
candidate = os.path.join(path, template + extension)
file_path = os.path.join(config.template_folder, candidate)
if os.path.isfile(file_path):
return Template(template, candidate, file_path)
parent = os.path.dirname(path)
if parent != path:
path = parent
else:
path = None
|
Given a file path and an acceptable list of templates, return the
best-matching template's path relative to the configured template
directory.
Arguments:
category -- The path to map
template_list -- A template to look up (as a string), or a list of templates.
|
def update(self, key, value):
"""
:param key: a string
:value: a string
"""
if not is_string(key):
raise Exception("Key must be string")
# if len(key) > 32:
# raise Exception("Max key length is 32")
if not is_string(value):
raise Exception("Value must be string")
# if value == '':
# return self.delete(key)
self.root_node = self._update_and_delete_storage(
self.root_node,
bin_to_nibbles(to_string(key)),
to_string(value))
self._update_root_hash()
|
:param key: a string
:value: a string
|
def build_search(self):
"""
Construct the ``Search`` object.
"""
s = self.search()
s = self.query(s, self._query)
s = self.filter(s)
if self.fields:
s = self.highlight(s)
s = self.sort(s)
self.aggregate(s)
return s
|
Construct the ``Search`` object.
|
def _map_xpath_flags_to_re(expr: str, xpath_flags: str) -> Tuple[int, str]:
""" Map `5.6.2 Flags <https://www.w3.org/TR/xpath-functions-31/#flags>`_ to python
:param expr: match pattern
:param xpath_flags: xpath flags
:returns: python flags / modified match pattern
"""
python_flags: int = 0
modified_expr = expr
if xpath_flags is None:
xpath_flags = ""
if 's' in xpath_flags:
python_flags |= re.DOTALL
if 'm' in xpath_flags:
python_flags |= re.MULTILINE
if 'i' in xpath_flags:
python_flags |= re.IGNORECASE
if 'x' in xpath_flags:
modified_expr = re.sub(r'[\t\n\r ]|\[[^\]]*\]', _char_class_escape, modified_expr)
if 'q' in xpath_flags:
modified_expr = re.escape(modified_expr)
return python_flags, modified_expr
|
Map `5.6.2 Flags <https://www.w3.org/TR/xpath-functions-31/#flags>`_ to python
:param expr: match pattern
:param xpath_flags: xpath flags
:returns: python flags / modified match pattern
|
def request(self, source, target):
"""Create or replace an entire configuration datastore with the contents of another complete
configuration datastore.
*source* is the name of the configuration datastore to use as the source of the copy operation or `config` element containing the configuration subtree to copy
*target* is the name of the configuration datastore to use as the destination of the copy operation
:seealso: :ref:`srctarget_params`"""
node = new_ele("copy-config")
node.append(util.datastore_or_url("target", target, self._assert))
try:
# datastore name or URL
node.append(util.datastore_or_url("source", source, self._assert))
except Exception:
# `source` with `config` element containing the configuration subtree to copy
node.append(validated_element(source, ("source", qualify("source"))))
return self._request(node)
|
Create or replace an entire configuration datastore with the contents of another complete
configuration datastore.
*source* is the name of the configuration datastore to use as the source of the copy operation or `config` element containing the configuration subtree to copy
*target* is the name of the configuration datastore to use as the destination of the copy operation
:seealso: :ref:`srctarget_params`
|
def getAllNodes(self):
'''
getAllNodes - Get every element
@return TagCollection<AdvancedTag>
'''
ret = TagCollection()
for rootNode in self.getRootNodes():
ret.append(rootNode)
ret += rootNode.getAllChildNodes()
return ret
|
getAllNodes - Get every element
@return TagCollection<AdvancedTag>
|
def copy_subtree(ret, element, msg):
'''copy_subtree
High-level api: Copy element as a subtree and put it as a child of ret.
Parameters
----------
element : `Element`
A node in a model tree.
msg : `str`
Message to be added.
ret : `Element`
A node in self.tree.
Returns
-------
None
Nothing returns.
'''
sub_element = ModelDiff.process_attrib(deepcopy(element), msg)
ret.append(sub_element)
return sub_element
|
copy_subtree
High-level api: Copy element as a subtree and put it as a child of ret.
Parameters
----------
element : `Element`
A node in a model tree.
msg : `str`
Message to be added.
ret : `Element`
A node in self.tree.
Returns
-------
None
Nothing returns.
|
def error(self):
"""**DEPRECATED**: Get the error if one occurred on the last operation.
This method is obsolete: all MongoDB write operations (insert, update,
remove, and so on) use the write concern ``w=1`` and report their
errors by default.
.. versionchanged:: 2.8
Deprecated.
"""
warnings.warn("Database.error() is deprecated",
DeprecationWarning, stacklevel=2)
error = self.command("getlasterror")
error_msg = error.get("err", "")
if error_msg is None:
return None
if error_msg.startswith("not master"):
# Reset primary server and request check, if another thread isn't
# doing so already.
primary = self.__client.primary
if primary:
self.__client._reset_server_and_request_check(primary)
return error
|
**DEPRECATED**: Get the error if one occurred on the last operation.
This method is obsolete: all MongoDB write operations (insert, update,
remove, and so on) use the write concern ``w=1`` and report their
errors by default.
.. versionchanged:: 2.8
Deprecated.
|
def install(self, release_id, upgrade=False):
"""Install target packages into a virtual environment.
If the virtual environment for the given release ID does not
exist on the remote system, it will be created. The virtual
environment will be created according to the standard Tunic
directory structure (see :doc:`design`).
If ``upgrade=True`` is passed, packages will be updated to the
most recent version if they are already installed in the virtual
environment.
:param str release_id: Timestamp-based identifier for this
deployment. If this ID corresponds to a virtual environment
that already exists, packages will be installed into this
environment.
:param bool upgrade: Should packages be updated if they are
already installed in the virtual environment.
:return: The results of running the installation command using
Fabric. Note that this return value is a decorated version
of a string that contains additional meta data about the
result of the command, in addition to the output generated.
:rtype: str
"""
release_path = os.path.join(self._releases, release_id)
if not self._runner.exists(release_path):
self._runner.run("{0} '{1}'".format(self._venv_path, release_path))
cmd = [os.path.join(release_path, 'bin', 'pip'), 'install']
if upgrade:
cmd.append('--upgrade')
sources = self._get_install_sources()
if sources:
cmd.append(sources)
cmd.extend("'{0}'".format(package) for package in self._packages)
return self._runner.run(' '.join(cmd))
|
Install target packages into a virtual environment.
If the virtual environment for the given release ID does not
exist on the remote system, it will be created. The virtual
environment will be created according to the standard Tunic
directory structure (see :doc:`design`).
If ``upgrade=True`` is passed, packages will be updated to the
most recent version if they are already installed in the virtual
environment.
:param str release_id: Timestamp-based identifier for this
deployment. If this ID corresponds to a virtual environment
that already exists, packages will be installed into this
environment.
:param bool upgrade: Should packages be updated if they are
already installed in the virtual environment.
:return: The results of running the installation command using
Fabric. Note that this return value is a decorated version
of a string that contains additional meta data about the
result of the command, in addition to the output generated.
:rtype: str
|
def load_obsdata(self, idx: int) -> None:
"""Load the next obs sequence value (of the given index)."""
if self._obs_ramflag:
self.obs[0] = self._obs_array[idx]
elif self._obs_diskflag:
raw = self._obs_file.read(8)
self.obs[0] = struct.unpack('d', raw)
|
Load the next obs sequence value (of the given index).
|
def unwrap_aliases(data_type):
"""
Convenience method to unwrap all Alias(es) from around a DataType.
Args:
data_type (DataType): The target to unwrap.
Return:
Tuple[DataType, bool]: The underlying data type and a bool indicating
whether the input type had at least one alias layer.
"""
unwrapped_alias = False
while is_alias(data_type):
unwrapped_alias = True
data_type = data_type.data_type
return data_type, unwrapped_alias
|
Convenience method to unwrap all Alias(es) from around a DataType.
Args:
data_type (DataType): The target to unwrap.
Return:
Tuple[DataType, bool]: The underlying data type and a bool indicating
whether the input type had at least one alias layer.
|
def find_closing_braces(self, query):
"""Find the index of the closing braces for the opening braces
at the start of the query string. Note that first character
of input string must be an opening braces."""
if query[0] != '(':
raise Exception("Trying to find closing braces for no opening braces")
num_open_braces = 0
for i in range(len(query)):
c = query[i]
if c == '(':
num_open_braces += 1
elif c == ')':
num_open_braces -= 1
if num_open_braces == 0:
return i
raise Exception("No closing braces found")
|
Find the index of the closing braces for the opening braces
at the start of the query string. Note that first character
of input string must be an opening braces.
|
def p_unitary_op_3(self, program):
"""
unitary_op : id '(' ')' primary_list
"""
program[0] = node.CustomUnitary([program[1], program[4]])
self.verify_as_gate(program[1], program[4])
self.verify_reg_list(program[4], 'qreg')
self.verify_distinct([program[4]])
|
unitary_op : id '(' ')' primary_list
|
def get_default_org(self):
""" retrieve the name and configuration of the default org """
for org in self.list_orgs():
org_config = self.get_org(org)
if org_config.default:
return org, org_config
return None, None
|
retrieve the name and configuration of the default org
|
def describe_api_key(apiKey, region=None, key=None, keyid=None, profile=None):
'''
Gets info about the given api key
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.describe_api_key apigw_api_key
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
response = conn.get_api_key(apiKey=apiKey)
return {'apiKey': _convert_datetime_str(response)}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
|
Gets info about the given api key
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.describe_api_key apigw_api_key
|
def get_all_compiler_versions():
"""Returns a sorted list of strings, like "70" or "80" or "9.0"
with most recent compiler version first.
"""
versions=[]
if is_windows:
if is_win64:
keyname = 'Software\\WoW6432Node\\Intel\\Compilers\\C++'
else:
keyname = 'Software\\Intel\\Compilers\\C++'
try:
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE,
keyname)
except SCons.Util.WinError:
# For version 13 or later, check for default instance UUID
if is_win64:
keyname = 'Software\\WoW6432Node\\Intel\\Suites'
else:
keyname = 'Software\\Intel\\Suites'
try:
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE,
keyname)
except SCons.Util.WinError:
return []
i = 0
versions = []
try:
while i < 100:
subkey = SCons.Util.RegEnumKey(k, i) # raises EnvironmentError
# Check that this refers to an existing dir.
# This is not 100% perfect but should catch common
# installation issues like when the compiler was installed
# and then the install directory deleted or moved (rather
# than uninstalling properly), so the registry values
# are still there.
if subkey == 'Defaults': # Ignore default instances
i = i + 1
continue
ok = False
for try_abi in ('IA32', 'IA32e', 'IA64', 'EM64T'):
try:
d = get_intel_registry_value('ProductDir', subkey, try_abi)
except MissingRegistryError:
continue # not found in reg, keep going
if os.path.exists(d): ok = True
if ok:
versions.append(subkey)
else:
try:
# Registry points to nonexistent dir. Ignore this
# version.
value = get_intel_registry_value('ProductDir', subkey, 'IA32')
except MissingRegistryError as e:
# Registry key is left dangling (potentially
# after uninstalling).
print("scons: *** Ignoring the registry key for the Intel compiler version %s.\n" \
"scons: *** It seems that the compiler was uninstalled and that the registry\n" \
"scons: *** was not cleaned up properly.\n" % subkey)
else:
print("scons: *** Ignoring "+str(value))
i = i + 1
except EnvironmentError:
# no more subkeys
pass
elif is_linux or is_mac:
for d in glob.glob('/opt/intel_cc_*'):
# Typical dir here is /opt/intel_cc_80.
m = re.search(r'cc_(.*)$', d)
if m:
versions.append(m.group(1))
for d in glob.glob('/opt/intel/cc*/*'):
# Typical dir here is /opt/intel/cc/9.0 for IA32,
# /opt/intel/cce/9.0 for EMT64 (AMD64)
m = re.search(r'([0-9][0-9.]*)$', d)
if m:
versions.append(m.group(1))
for d in glob.glob('/opt/intel/Compiler/*'):
# Typical dir here is /opt/intel/Compiler/11.1
m = re.search(r'([0-9][0-9.]*)$', d)
if m:
versions.append(m.group(1))
for d in glob.glob('/opt/intel/composerxe-*'):
# Typical dir here is /opt/intel/composerxe-2011.4.184
m = re.search(r'([0-9][0-9.]*)$', d)
if m:
versions.append(m.group(1))
for d in glob.glob('/opt/intel/composer_xe_*'):
# Typical dir here is /opt/intel/composer_xe_2011_sp1.11.344
# The _sp1 is useless, the installers are named 2011.9.x, 2011.10.x, 2011.11.x
m = re.search(r'([0-9]{0,4})(?:_sp\d*)?\.([0-9][0-9.]*)$', d)
if m:
versions.append("%s.%s"%(m.group(1), m.group(2)))
for d in glob.glob('/opt/intel/compilers_and_libraries_*'):
# JPA: For the new version of Intel compiler 2016.1.
m = re.search(r'([0-9]{0,4})(?:_sp\d*)?\.([0-9][0-9.]*)$', d)
if m:
versions.append("%s.%s"%(m.group(1), m.group(2)))
def keyfunc(str):
"""Given a dot-separated version string, return a tuple of ints representing it."""
return [int(x) for x in str.split('.')]
# split into ints, sort, then remove dups
return sorted(SCons.Util.unique(versions), key=keyfunc, reverse=True)
|
Returns a sorted list of strings, like "70" or "80" or "9.0"
with most recent compiler version first.
|
def create_window(self):
"""Create a QMainWindow instance containing this plugin."""
self.undocked_window = window = PluginWindow(self)
window.setAttribute(Qt.WA_DeleteOnClose)
icon = self.get_plugin_icon()
if is_text_string(icon):
icon = self.get_icon(icon)
window.setWindowIcon(icon)
window.setWindowTitle(self.get_plugin_title())
window.setCentralWidget(self)
window.resize(self.size())
self.refresh_plugin()
self.dockwidget.setFloating(False)
self.dockwidget.setVisible(False)
window.show()
|
Create a QMainWindow instance containing this plugin.
|
def BSearchFloor(a, x, lo=0, hi=None):
"""Returns highest i such as a[i] <= x, or -1 if x < all elements in a
So, if x is in between two elements in a, this function will return the
index of the lower element, hence "Floor".
Arguments:
a -- ordered numeric sequence
x -- element to search within a
lo -- lowest index to consider in search
hi -- highest index to consider in search"""
if len(a) == 0: return -1
hi = hi if hi is not None else len(a)
pos = bisect_left(a, x, lo, hi)
return pos - 1 if pos >= hi \
else (pos if x == a[pos] else (pos - 1 if pos > lo else -1))
|
Returns highest i such as a[i] <= x, or -1 if x < all elements in a
So, if x is in between two elements in a, this function will return the
index of the lower element, hence "Floor".
Arguments:
a -- ordered numeric sequence
x -- element to search within a
lo -- lowest index to consider in search
hi -- highest index to consider in search
|
def distance(args):
"""
%prog distance bedfile
Calculate distance between bed features. The output file is a list of
distances, which can be used to plot histogram, etc.
"""
from jcvi.utils.iter import pairwise
p = OptionParser(distance.__doc__)
p.add_option("--distmode", default="ss", choices=("ss", "ee"),
help="Distance mode between paired reads. ss is outer distance, " \
"ee is inner distance [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
sortedbedfile = sort([bedfile])
valid = total = 0
fp = open(sortedbedfile)
for a, b in pairwise(fp):
a = BedLine(a)
b = BedLine(b)
ar = (a.seqid, a.start, a.end, "+")
br = (b.seqid, b.start, b.end, "+")
dist, oo = range_distance(ar, br, distmode=opts.distmode)
total += 1
if dist > 0:
print(dist)
valid += 1
logging.debug("Total valid (> 0) distances: {0}.".\
format(percentage(valid, total)))
|
%prog distance bedfile
Calculate distance between bed features. The output file is a list of
distances, which can be used to plot histogram, etc.
|
def addSpatialNoise(self, sequence, amount):
"""
Add spatial noise to each pattern in the sequence.
@param sequence (list) Sequence
@param amount (float) Amount of spatial noise
@return (list) Sequence with spatial noise
"""
newSequence = []
for pattern in sequence:
if pattern is not None:
pattern = self.patternMachine.addNoise(pattern, amount)
newSequence.append(pattern)
return newSequence
|
Add spatial noise to each pattern in the sequence.
@param sequence (list) Sequence
@param amount (float) Amount of spatial noise
@return (list) Sequence with spatial noise
|
def get_md_header(header_text_line: str,
header_duplicate_counter: dict,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False) -> dict:
r"""Build a data structure with the elements needed to create a TOC line.
:parameter header_text_line: a single markdown line that needs to be
transformed into a TOC line.
:parameter header_duplicate_counter: a data structure that contains the
number of occurrencies of each header anchor link. This is used to
avoid duplicate anchor links and it is meaningful only for certain
values of parser.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_line: str
:type header_duplicate_counter: dict
:type keep_header_levels: int
:type parser: str
:returns: None if the input line does not correspond to one of the
designated cases or a data structure containing the necessary
components to create a table of contents line, otherwise.
:rtype: dict
:raises: a built-in exception.
.. note::
This works like a wrapper to other functions.
"""
result = get_atx_heading(header_text_line, keep_header_levels, parser,
no_links)
if result is None:
return result
else:
header_type, header_text_trimmed = result
header = {
'type':
header_type,
'text_original':
header_text_trimmed,
'text_anchor_link':
build_anchor_link(header_text_trimmed, header_duplicate_counter,
parser)
}
return header
|
r"""Build a data structure with the elements needed to create a TOC line.
:parameter header_text_line: a single markdown line that needs to be
transformed into a TOC line.
:parameter header_duplicate_counter: a data structure that contains the
number of occurrencies of each header anchor link. This is used to
avoid duplicate anchor links and it is meaningful only for certain
values of parser.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_line: str
:type header_duplicate_counter: dict
:type keep_header_levels: int
:type parser: str
:returns: None if the input line does not correspond to one of the
designated cases or a data structure containing the necessary
components to create a table of contents line, otherwise.
:rtype: dict
:raises: a built-in exception.
.. note::
This works like a wrapper to other functions.
|
def get_fields(schema, exclude_dump_only=False):
"""Return fields from schema
:param Schema schema: A marshmallow Schema instance or a class object
:param bool exclude_dump_only: whether to filter fields in Meta.dump_only
:rtype: dict, of field name field object pairs
"""
if hasattr(schema, "fields"):
fields = schema.fields
elif hasattr(schema, "_declared_fields"):
fields = copy.deepcopy(schema._declared_fields)
else:
raise ValueError(
"{!r} doesn't have either `fields` or `_declared_fields`.".format(schema)
)
Meta = getattr(schema, "Meta", None)
warn_if_fields_defined_in_meta(fields, Meta)
return filter_excluded_fields(fields, Meta, exclude_dump_only)
|
Return fields from schema
:param Schema schema: A marshmallow Schema instance or a class object
:param bool exclude_dump_only: whether to filter fields in Meta.dump_only
:rtype: dict, of field name field object pairs
|
def wrap_call(self, call_cmd):
"""
"wraps" the call_cmd so it can be executed by subprocess.call (and related flavors) as "args" argument
:param call_cmd: original args like argument (string or sequence)
:return: a sequence with the original command "executed" under trickle
"""
if isinstance(call_cmd, basestring): # FIXME python 3 unsafe
call_cmd = [call_cmd]
return [self._trickle_cmd, "-s"] + self._settings.to_argument_list() + list(call_cmd)
|
"wraps" the call_cmd so it can be executed by subprocess.call (and related flavors) as "args" argument
:param call_cmd: original args like argument (string or sequence)
:return: a sequence with the original command "executed" under trickle
|
def do_label(self):
"""
Create label for x and y axis, title and suptitle
"""
outputdict = self.outputdict
xlabel_options = self.kwargs.get("xlabel_options", {})
self.subplot.set_xlabel(
self.kwargs.get("xlabel", "").format(**outputdict),
**xlabel_options)
ylabel_options = self.kwargs.get("ylabel_options", {})
self.subplot.set_ylabel(
self.kwargs.get("ylabel", "").format(**outputdict),
**ylabel_options)
suptitle = self.kwargs.get("suptitle", None)
if suptitle is not None:
suptitle_options = self.kwargs.get("suptitle_options", {})
self.figure.suptitle(
suptitle.format(**outputdict),
fontsize=int(self.kwargs.get("suptitle_fontsize", 15)),
**suptitle_options)
title = self.kwargs.get("title", None)
if title is not None:
title_options = self.kwargs.get("title_options", {})
self.subplot.set_title(
title.format(**outputdict),
fontsize=int(self.kwargs.get("title_fontsize", 12)),
**title_options)
xlim = self.kwargs.get("xlim", None)
ylim = self.kwargs.get("ylim", None)
if xlim is not None:
self.subplot.set_xlim(xlim)
if ylim is not None:
self.subplot.set_ylim(ylim)
# axis format
self.subplot.ticklabel_format(
style="sci", useOffset=False,
scilimits=self.kwargs.get("scilimits", (-4, 4))
)
return self
|
Create label for x and y axis, title and suptitle
|
def hybrid_forward(self, F, a, b):
"""
Forward of Decomposable Attention layer
"""
# a.shape = [B, L1, H]
# b.shape = [B, L2, H]
# extract features
tilde_a = self.f(a) # shape = [B, L1, H]
tilde_b = self.f(b) # shape = [B, L2, H]
# attention
# e.shape = [B, L1, L2]
e = F.batch_dot(tilde_a, tilde_b, transpose_b=True)
# beta: b align to a, [B, L1, H]
beta = F.batch_dot(e.softmax(), tilde_b)
# alpha: a align to b, [B, L2, H]
alpha = F.batch_dot(e.transpose([0, 2, 1]).softmax(), tilde_a)
# compare
feature1 = self.g(F.concat(tilde_a, beta, dim=2))
feature2 = self.g(F.concat(tilde_b, alpha, dim=2))
feature1 = feature1.sum(axis=1)
feature2 = feature2.sum(axis=1)
yhat = self.h(F.concat(feature1, feature2, dim=1))
return yhat
|
Forward of Decomposable Attention layer
|
def main():
'''Main routine.'''
# validate command line arguments
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--name', '-n', required=True, action='store', help='Name')
arg_parser.add_argument('--rgname', '-g', required=True, action='store',
help='Resource Group Name')
arg_parser.add_argument('--location', '-l', required=True, action='store',
help='Location, e.g. eastus')
arg_parser.add_argument('--verbose', '-v', action='store_true', default=False,
help='Print operational details')
args = arg_parser.parse_args()
name = args.name
rgname = args.rgname
location = args.location
# Load Azure app defaults
try:
with open('azurermconfig.json') as config_file:
config_data = json.load(config_file)
except FileNotFoundError:
sys.exit('Error: Expecting azurermconfig.json in current folder')
tenant_id = config_data['tenantId']
app_id = config_data['appId']
app_secret = config_data['appSecret']
subscription_id = config_data['subscriptionId']
# authenticate
access_token = azurerm.get_access_token(tenant_id, app_id, app_secret)
# initialize haikunator
hkn = Haikunator()
# create NSG
nsg_name = name + 'nsg'
print('Creating NSG: ' + nsg_name)
rmreturn = azurerm.create_nsg(access_token, subscription_id, rgname, nsg_name, location)
nsg_id = rmreturn.json()['id']
print('nsg_id = ' + nsg_id)
# create NSG rule
nsg_rule = 'ssh'
print('Creating NSG rule: ' + nsg_rule)
rmreturn = azurerm.create_nsg_rule(access_token, subscription_id, rgname, nsg_name, nsg_rule,
description='ssh rule', destination_range='22')
print(rmreturn)
print(json.dumps(rmreturn.json(), sort_keys=False, indent=2, separators=(',', ': ')))
# create VNET
vnetname = name + 'vnet'
print('Creating VNet: ' + vnetname)
rmreturn = azurerm.create_vnet(access_token, subscription_id, rgname, vnetname, location,
nsg_id=nsg_id)
print(rmreturn)
# print(json.dumps(rmreturn.json(), sort_keys=False, indent=2, separators=(',', ': ')))
subnet_id = rmreturn.json()['properties']['subnets'][0]['id']
print('subnet_id = ' + subnet_id)
# create public IP address
public_ip_name = name + 'ip'
dns_label = name + 'ip'
print('Creating public IP address: ' + public_ip_name)
rmreturn = azurerm.create_public_ip(access_token, subscription_id, rgname, public_ip_name,
dns_label, location)
print(rmreturn)
ip_id = rmreturn.json()['id']
print('ip_id = ' + ip_id)
print('Waiting for IP provisioning..')
waiting = True
while waiting:
ipa = azurerm.get_public_ip(access_token, subscription_id, rgname, public_ip_name)
if ipa['properties']['provisioningState'] == 'Succeeded':
waiting = False
time.sleep(1)
# create NIC
nic_name = name + 'nic'
print('Creating NIC: ' + nic_name)
rmreturn = azurerm.create_nic(access_token, subscription_id, rgname, nic_name, ip_id,
subnet_id, location)
#print(json.dumps(rmreturn.json(), sort_keys=False, indent=2, separators=(',', ': ')))
nic_id = rmreturn.json()['id']
print('Waiting for NIC provisioning..')
waiting = True
while waiting:
nic = azurerm.get_nic(access_token, subscription_id, rgname, nic_name)
if nic['properties']['provisioningState'] == 'Succeeded':
waiting = False
time.sleep(1)
# create VM
vm_name = name
vm_size = 'Standard_D1'
publisher = 'CoreOS'
offer = 'CoreOS'
sku = 'Stable'
version = 'latest'
username = 'azure'
password = hkn.haikunate(delimiter=',') # creates random password
print('password = ' + password)
print('Creating VM: ' + vm_name)
rmreturn = azurerm.create_vm(access_token, subscription_id, rgname, vm_name, vm_size,
publisher, offer, sku, version, nic_id, location,
username=username, password=password)
print(rmreturn)
print(json.dumps(rmreturn.json(), sort_keys=False, indent=2, separators=(',', ': ')))
|
Main routine.
|
def process_request_params(
self,
params: Sequence[ExtensionParameter],
accepted_extensions: Sequence[Extension],
) -> Tuple[List[ExtensionParameter], Extension]:
"""
Process request parameters received from the client.
``params`` is a list of (name, value) pairs.
``accepted_extensions`` is a list of previously accepted extensions.
To accept the offer, return a 2-uple containing:
- response parameters: a list of (name, value) pairs
- an extension: an instance of a subclass of :class:`Extension`
To reject the offer, raise
:exc:`~websockets.exceptions.NegotiationError`.
"""
|
Process request parameters received from the client.
``params`` is a list of (name, value) pairs.
``accepted_extensions`` is a list of previously accepted extensions.
To accept the offer, return a 2-uple containing:
- response parameters: a list of (name, value) pairs
- an extension: an instance of a subclass of :class:`Extension`
To reject the offer, raise
:exc:`~websockets.exceptions.NegotiationError`.
|
def fix_geometry(self, isophote):
"""
Fix the geometry of a problematic isophote to be identical to
the input isophote.
This method should be called when the fitting goes berserk and
delivers an isophote with bad geometry, such as ellipticity > 1
or another meaningless situation. This is not a problem in
itself when fitting any given isophote, but will create an error
when the affected isophote is used as starting guess for the
next fit.
Parameters
----------
isophote : `~photutils.isophote.Isophote` instance
The isophote from which to take the geometry information.
"""
self.sample.geometry.eps = isophote.sample.geometry.eps
self.sample.geometry.pa = isophote.sample.geometry.pa
self.sample.geometry.x0 = isophote.sample.geometry.x0
self.sample.geometry.y0 = isophote.sample.geometry.y0
|
Fix the geometry of a problematic isophote to be identical to
the input isophote.
This method should be called when the fitting goes berserk and
delivers an isophote with bad geometry, such as ellipticity > 1
or another meaningless situation. This is not a problem in
itself when fitting any given isophote, but will create an error
when the affected isophote is used as starting guess for the
next fit.
Parameters
----------
isophote : `~photutils.isophote.Isophote` instance
The isophote from which to take the geometry information.
|
def main(args=None):
"""The main routine."""
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(
description='Makes UCL PHAS results better')
parser.add_argument('--input', '-i',
type=str, help="csv file to import")
parser.add_argument('--format', '-f', type=str,
help="reformats results by module and exports it to file specified")
parser.add_argument('--plot', '-p', action='store_true',
help="plot the module results")
parser.add_argument('--exportplots', '-ep', type=str,
help="export all plots to /path/you/want/")
parser.add_argument('--showplots', '-sp',
action='store_true', help="show all plots")
parser.add_argument(
'--my', '-m', action="store_true", help="returns your weighted average for the year")
parser.add_argument('--year', '-y', help="specify your year")
parser.add_argument('--rank', '-r', action='store_true',
help="returns your rank in the year")
parser.add_argument('--candidate', '-c',
help="specify your candidate number")
args = parser.parse_args()
#########################
# #
# end #
# argparse #
# #
# #
#########################
resultr.main(args)
|
The main routine.
|
def check_arguments(args, parser):
"""Check arguments passed by user that are not checked by argparse itself."""
if args.asm_block not in ['auto', 'manual']:
try:
args.asm_block = int(args.asm_block)
except ValueError:
parser.error('--asm-block can only be "auto", "manual" or an integer')
# Set default unit depending on performance model requested
if not args.unit:
if 'Roofline' in args.pmodel or 'RooflineIACA' in args.pmodel:
args.unit = 'FLOP/s'
else:
args.unit = 'cy/CL'
|
Check arguments passed by user that are not checked by argparse itself.
|
def get_chart(self, relation=None, index=0, limit=10, **kwargs):
"""
Get chart
:returns: a list of :class:`~deezer.resources.Resource` objects.
"""
return self.get_object(
"chart", object_id="0", relation=relation, parent="chart", **kwargs
)
|
Get chart
:returns: a list of :class:`~deezer.resources.Resource` objects.
|
def density_2d(self, x, y, Rs, rho0, r_core, center_x=0, center_y=0):
"""
projected two dimenstional NFW profile (kappa*Sigma_crit)
:param R: radius of interest
:type R: float/numpy array
:param Rs: scale radius
:type Rs: float
:param rho0: density normalization (characteristic density)
:type rho0: float
:param r200: radius of (sub)halo
:type r200: float>0
:return: Epsilon(R) projected density at radius R
"""
x_ = x - center_x
y_ = y - center_y
R = np.sqrt(x_ ** 2 + y_ ** 2)
b = r_core * Rs ** -1
x = R * Rs ** -1
Fx = self._F(x, b)
return 2 * rho0 * Rs * Fx
|
projected two dimenstional NFW profile (kappa*Sigma_crit)
:param R: radius of interest
:type R: float/numpy array
:param Rs: scale radius
:type Rs: float
:param rho0: density normalization (characteristic density)
:type rho0: float
:param r200: radius of (sub)halo
:type r200: float>0
:return: Epsilon(R) projected density at radius R
|
def merged(self, other):
"""Returns a new ParseNode whose type is this node's type, and whose children are all the
children from this node and the other whose length is not 0.
"""
children = [c for c in itertools.chain(self.children, other.children) if len(c) > 0]
# NOTE: Only terminals should have ignored text attached to them, and terminals shouldn't be
# merged (probably) so it shouldn't be necessary to copy of ignored -- it should always
# be None. But, we'll go ahead and copy it over anyway, recognizing that other's
# ignored text will be lost.
return ParseNode(self.node_type,
children=children,
consumed=self.consumed + other.consumed,
ignored=self.ignored)
|
Returns a new ParseNode whose type is this node's type, and whose children are all the
children from this node and the other whose length is not 0.
|
def last_bed_temp(self):
"""Return avg bed temperature for last session."""
try:
bedtemps = self.intervals[1]['timeseries']['tempBedC']
except KeyError:
return None
tmp = 0
num_temps = len(bedtemps)
if num_temps == 0:
return None
for temp in bedtemps:
tmp += temp[1]
bedtemp = tmp/num_temps
return bedtemp
|
Return avg bed temperature for last session.
|
def toggle_plain_text(self, checked):
"""Toggle plain text docstring"""
if checked:
self.docstring = checked
self.switch_to_plain_text()
self.force_refresh()
self.set_option('rich_mode', not checked)
|
Toggle plain text docstring
|
def _outputMessages(self, warnings, node):
"""
Map pycodestyle results to messages in pylint, then output them.
@param warnings: it should be a list of tuple including
line number and message id
"""
if not warnings:
# No warnings were found
return
for warning in warnings:
linenum, offset, msgidInPyCodeStyle, text = warning
if text.startswith(msgidInPyCodeStyle):
# If the PyCodeStyle code is at the start of the text, trim it out
text = text[len(msgidInPyCodeStyle) + 1:]
if msgidInPyCodeStyle in self.mapPyCodeStyleMessages:
msgid, patternArguments = self.mapPyCodeStyleMessages[msgidInPyCodeStyle]
if (not self.pycodestyleEnabled and
msgid in self.standardPyCodeStyleMessages):
continue
arguments = []
if patternArguments:
matchResult = re.search(patternArguments, text)
if matchResult:
arguments = matchResult.groups()
self.add_message(msgid, line=linenum, args=arguments, node=node)
|
Map pycodestyle results to messages in pylint, then output them.
@param warnings: it should be a list of tuple including
line number and message id
|
def init_states(batch_size, num_lstm_layer, num_hidden):
"""
Returns name and shape of init states of LSTM network
Parameters
----------
batch_size: list of tuple of str and tuple of int and int
num_lstm_layer: int
num_hidden: int
Returns
-------
list of tuple of str and tuple of int and int
"""
init_c = [('l%d_init_c' % l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]
init_h = [('l%d_init_h' % l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]
return init_c + init_h
|
Returns name and shape of init states of LSTM network
Parameters
----------
batch_size: list of tuple of str and tuple of int and int
num_lstm_layer: int
num_hidden: int
Returns
-------
list of tuple of str and tuple of int and int
|
def authenticate_external(self, auth_params):
"""Verify credentials using the external auth library.
in auth_params of type str
The auth parameters, credentials, etc.
out result of type str
The authentification result.
"""
if not isinstance(auth_params, list):
raise TypeError("auth_params can only be an instance of type list")
for a in auth_params[:10]:
if not isinstance(a, basestring):
raise TypeError(
"array can only contain objects of type basestring")
result = self._call("authenticateExternal",
in_p=[auth_params])
return result
|
Verify credentials using the external auth library.
in auth_params of type str
The auth parameters, credentials, etc.
out result of type str
The authentification result.
|
def download(ctx):
"""Download code of the current project."""
user, project_name = get_project_or_local(ctx.obj.get('project'))
try:
PolyaxonClient().project.download_repo(user, project_name)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not download code for project `{}`.'.format(project_name))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
Printer.print_success('Files downloaded.')
|
Download code of the current project.
|
def ProcessHuntFlowLog(flow_obj, log_msg):
"""Processes log message from a given hunt-induced flow."""
if not hunt.IsLegacyHunt(flow_obj.parent_hunt_id):
return
hunt_urn = rdfvalue.RDFURN("hunts").Add(flow_obj.parent_hunt_id)
flow_urn = hunt_urn.Add(flow_obj.flow_id)
log_entry = rdf_flows.FlowLog(
client_id=flow_obj.client_id,
urn=flow_urn,
flow_name=flow_obj.flow_class_name,
log_message=log_msg)
with data_store.DB.GetMutationPool() as pool:
grr_collections.LogCollection.StaticAdd(
hunt_urn.Add("Logs"), log_entry, mutation_pool=pool)
|
Processes log message from a given hunt-induced flow.
|
def to_api(in_dict, int_keys=None, date_keys=None, bool_keys=None):
"""Extends a given object for API Production."""
# Cast all int_keys to int()
if int_keys:
for in_key in int_keys:
if (in_key in in_dict) and (in_dict.get(in_key, None) is not None):
in_dict[in_key] = int(in_dict[in_key])
# Cast all date_keys to datetime.isoformat
if date_keys:
for in_key in date_keys:
if (in_key in in_dict) and (in_dict.get(in_key, None) is not None):
_from = in_dict[in_key]
if isinstance(_from, basestring):
dtime = parse_datetime(_from)
elif isinstance(_from, datetime):
dtime = _from
in_dict[in_key] = dtime.isoformat()
elif (in_key in in_dict) and in_dict.get(in_key, None) is None:
del in_dict[in_key]
# Remove all Nones
for k, v in in_dict.items():
if v is None:
del in_dict[k]
return in_dict
|
Extends a given object for API Production.
|
def organizations(self, organization, include=None):
"""
Retrieve the tickets for this organization.
:param include: list of objects to sideload. `Side-loading API Docs
<https://developer.zendesk.com/rest_api/docs/core/side_loading>`__.
:param organization: Organization object or id
"""
return self._query_zendesk(self.endpoint.organizations, 'ticket', id=organization, include=include)
|
Retrieve the tickets for this organization.
:param include: list of objects to sideload. `Side-loading API Docs
<https://developer.zendesk.com/rest_api/docs/core/side_loading>`__.
:param organization: Organization object or id
|
def derive_fields(self):
"""
Derives our fields. We first default to using our 'fields' variable if available,
otherwise we figure it out from our object.
"""
if self.fields:
return list(self.fields)
else:
fields = []
for field in self.object._meta.fields:
fields.append(field.name)
# only exclude? then remove those items there
exclude = self.derive_exclude()
# remove any excluded fields
fields = [field for field in fields if field not in exclude]
return fields
|
Derives our fields. We first default to using our 'fields' variable if available,
otherwise we figure it out from our object.
|
def _batched_op_msg_compressed(
operation, command, docs, check_keys, ack, opts, ctx):
"""Create the next batched insert, update, or delete operation
with OP_MSG, compressed.
"""
data, to_send = _encode_batched_op_msg(
operation, command, docs, check_keys, ack, opts, ctx)
request_id, msg = _compress(
2013,
data,
ctx.sock_info.compression_context)
return request_id, msg, to_send
|
Create the next batched insert, update, or delete operation
with OP_MSG, compressed.
|
def diff_texts(left, right, diff_options=None, formatter=None):
"""Takes two Unicode strings containing XML"""
return _diff(etree.fromstring, left, right,
diff_options=diff_options, formatter=formatter)
|
Takes two Unicode strings containing XML
|
def copy(self, copyPrimaryKey=False, copyValues=False):
'''
copy - Copies this object.
@param copyPrimaryKey <bool> default False - If True, any changes to the copy will save over-top the existing entry in Redis.
If False, only the data is copied, and nothing is saved.
@param copyValues <bool> default False - If True, every field value on this object will be explicitly copied. If False,
an object will be created with the same values, and depending on the type may share the same reference.
This is the difference between a copy and a deepcopy.
@return <IndexedRedisModel> - Copy of this object, per above
If you need a copy that IS linked, @see IndexedRedisModel.copy
'''
cpy = self.__class__(**self.asDict(copyPrimaryKey, forStorage=False))
if copyValues is True:
for fieldName in cpy.FIELDS:
setattr(cpy, fieldName, copy.deepcopy(getattr(cpy, fieldName)))
return cpy
|
copy - Copies this object.
@param copyPrimaryKey <bool> default False - If True, any changes to the copy will save over-top the existing entry in Redis.
If False, only the data is copied, and nothing is saved.
@param copyValues <bool> default False - If True, every field value on this object will be explicitly copied. If False,
an object will be created with the same values, and depending on the type may share the same reference.
This is the difference between a copy and a deepcopy.
@return <IndexedRedisModel> - Copy of this object, per above
If you need a copy that IS linked, @see IndexedRedisModel.copy
|
def getLibs(self, explicit_only=False):
''' Return a dictionary of libraries to compile: {"dirname":"libname"},
this is used when automatically generating CMakeLists.
If explicit_only is not set, then in the absence of both 'lib' and
'bin' sections in the module.json file, the "source" directory
will be returned.
Note that currently modules may define only a single executable
binary or library to be built by the automatic build system, by
specifying `"bin": "dir-to-be-built-into-binary"`, or `"lib":
"dir-to-be-built-into-library"`, and the bin/lib will always have
the same name as the module. The default behaviour if nothing is
specified is for the 'source' directory to be built into a library.
The module.json syntax may allow for other combinations in the
future (and callers of this function should not rely on it
returning only a single item). For example, a "bin": {"dirname":
"exename"} syntax might be supported, however currently more
complex builds must be controlled by custom CMakeLists.
'''
if 'lib' in self.description:
return {os.path.normpath(self.description['lib']): self.getName()}
elif 'bin' not in self.description and not explicit_only:
return {'source': self.getName()}
else:
return {}
|
Return a dictionary of libraries to compile: {"dirname":"libname"},
this is used when automatically generating CMakeLists.
If explicit_only is not set, then in the absence of both 'lib' and
'bin' sections in the module.json file, the "source" directory
will be returned.
Note that currently modules may define only a single executable
binary or library to be built by the automatic build system, by
specifying `"bin": "dir-to-be-built-into-binary"`, or `"lib":
"dir-to-be-built-into-library"`, and the bin/lib will always have
the same name as the module. The default behaviour if nothing is
specified is for the 'source' directory to be built into a library.
The module.json syntax may allow for other combinations in the
future (and callers of this function should not rely on it
returning only a single item). For example, a "bin": {"dirname":
"exename"} syntax might be supported, however currently more
complex builds must be controlled by custom CMakeLists.
|
def object_from_json(self, object_type, object_json, parent=None):
"""
Given a blob of JSON representing a Zenpy object, recursively deserialize it and
any nested objects it contains. This method also adds the deserialized object
to the relevant cache if applicable.
"""
if not isinstance(object_json, dict):
return object_json
obj = self.instantiate_object(object_type, parent)
for key, value in object_json.items():
if key not in self.skip_attrs:
key, value = self._deserialize(key, obj, value)
if isinstance(value, dict):
value = ProxyDict(value, dirty_callback=getattr(
obj, '_dirty_callback', None))
elif isinstance(value, list):
value = ProxyList(value, dirty_callback=getattr(
obj, '_dirty_callback', None))
setattr(obj, key, value)
if hasattr(obj, '_clean_dirty'):
obj._clean_dirty()
self.api.cache.add(obj)
return obj
|
Given a blob of JSON representing a Zenpy object, recursively deserialize it and
any nested objects it contains. This method also adds the deserialized object
to the relevant cache if applicable.
|
def add_hydrogen(self, num):
"""Adds hydrogens
Args:
num (int): number of hydrogens
"""
self.H_count = num
if num > 0 and self.symbol in ("N", "O"):
self.H_donor = 1
else:
self.H_donor = 0
|
Adds hydrogens
Args:
num (int): number of hydrogens
|
def at(self, root):
"""Set root where PyJsonq start to prepare
:@param root
:@type root: string
:@return self
:@throws KeyError
"""
leafs = root.strip(" ").split('.')
for leaf in leafs:
if leaf:
self._json_data = self.__get_value_from_data(leaf, self._json_data)
return self
|
Set root where PyJsonq start to prepare
:@param root
:@type root: string
:@return self
:@throws KeyError
|
def allow_cors(func):
"""This is a decorator which enable CORS for the specified endpoint."""
def wrapper(*args, **kwargs):
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = \
'PUT, GET, POST, DELETE, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = \
'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'
return func(*args, **kwargs)
return wrapper
|
This is a decorator which enable CORS for the specified endpoint.
|
def set_daily(self, interval, **kwargs):
""" Set to repeat every x no. of days
:param int interval: no. of days to repeat at
:keyword date start: Start date of repetition (kwargs)
:keyword date end: End date of repetition (kwargs)
:keyword int occurrences: no of occurrences (kwargs)
"""
self._clear_pattern()
self.__interval = interval
self.set_range(**kwargs)
|
Set to repeat every x no. of days
:param int interval: no. of days to repeat at
:keyword date start: Start date of repetition (kwargs)
:keyword date end: End date of repetition (kwargs)
:keyword int occurrences: no of occurrences (kwargs)
|
def copy_current_websocket_context(func: Callable) -> Callable:
"""Share the current websocket context with the function decorated.
The websocket context is local per task and hence will not be
available in any other task. This decorator can be used to make
the context available,
.. code-block:: python
@copy_current_websocket_context
async def within_context() -> None:
method = websocket.method
...
"""
if not has_websocket_context():
raise RuntimeError('Attempt to copy websocket context outside of a websocket context')
websocket_context = _websocket_ctx_stack.top.copy()
@wraps(func)
async def wrapper(*args: Any, **kwargs: Any) -> Any:
async with websocket_context:
return await func(*args, **kwargs)
return wrapper
|
Share the current websocket context with the function decorated.
The websocket context is local per task and hence will not be
available in any other task. This decorator can be used to make
the context available,
.. code-block:: python
@copy_current_websocket_context
async def within_context() -> None:
method = websocket.method
...
|
def bgsave(host=None, port=None, db=None, password=None):
'''
Asynchronously save the dataset to disk
CLI Example:
.. code-block:: bash
salt '*' redis.bgsave
'''
server = _connect(host, port, db, password)
return server.bgsave()
|
Asynchronously save the dataset to disk
CLI Example:
.. code-block:: bash
salt '*' redis.bgsave
|
def live_processes(self):
"""Return a list of the live processes.
Returns:
A list of the live processes.
"""
result = []
for process_type, process_infos in self.all_processes.items():
for process_info in process_infos:
if process_info.process.poll() is None:
result.append((process_type, process_info.process))
return result
|
Return a list of the live processes.
Returns:
A list of the live processes.
|
def basename(path, extension_marker="."):
"""
:param str|None path: Path to consider
:param str|None extension_marker: Trim file extension based on specified character
:return str: Basename part of path, without extension (if 'extension_marker' provided)
"""
result = os.path.basename(path or "")
if extension_marker:
pre, _, post = result.rpartition(extension_marker)
return pre or post
return result
|
:param str|None path: Path to consider
:param str|None extension_marker: Trim file extension based on specified character
:return str: Basename part of path, without extension (if 'extension_marker' provided)
|
def register_presence_callback(self, type_, from_, cb):
"""
Register a callback to be called when a presence stanza is received.
:param type_: Presence type to listen for.
:type type_: :class:`~.PresenceType`
:param from_: Sender JID to listen for, or :data:`None` for a wildcard
match.
:type from_: :class:`~aioxmpp.JID` or :data:`None`.
:param cb: Callback function
:raises ValueError: if another listener with the same ``(type_,
from_)`` pair is already registered
:raises ValueError: if `type_` is not a valid
:class:`~.PresenceType` (and cannot be cast
to a :class:`~.PresenceType`)
`cb` will be called whenever a presence stanza matching the `type_` is
received from the specified sender. `from_` may be :data:`None` to
indicate a wildcard. Like with :meth:`register_message_callback`, more
specific callbacks win over less specific callbacks. The fallback order
is identical, except that the ``type_=None`` entries described there do
not apply for presence stanzas and are thus omitted.
See :meth:`.SimpleStanzaDispatcher.register_callback` for the exact
wildcarding rules.
.. versionchanged:: 0.7
The `type_` argument is now supposed to be a
:class:`~.PresenceType` member.
.. deprecated:: 0.7
Passing a :class:`str` as `type_` argument is deprecated and will
raise a :class:`TypeError` as of the 1.0 release. See the Changelog
for :ref:`api-changelog-0.7` for further details on how to upgrade
your code efficiently.
.. deprecated:: 0.9
This method has been deprecated. It is recommended to use
:class:`aioxmpp.PresenceClient` instead.
"""
type_ = self._coerce_enum(type_, structs.PresenceType)
warnings.warn(
"register_presence_callback is deprecated; use "
"aioxmpp.dispatcher.SimplePresenceDispatcher or "
"aioxmpp.PresenceClient instead",
DeprecationWarning,
stacklevel=2
)
self._xxx_presence_dispatcher.register_callback(
type_,
from_,
cb,
)
|
Register a callback to be called when a presence stanza is received.
:param type_: Presence type to listen for.
:type type_: :class:`~.PresenceType`
:param from_: Sender JID to listen for, or :data:`None` for a wildcard
match.
:type from_: :class:`~aioxmpp.JID` or :data:`None`.
:param cb: Callback function
:raises ValueError: if another listener with the same ``(type_,
from_)`` pair is already registered
:raises ValueError: if `type_` is not a valid
:class:`~.PresenceType` (and cannot be cast
to a :class:`~.PresenceType`)
`cb` will be called whenever a presence stanza matching the `type_` is
received from the specified sender. `from_` may be :data:`None` to
indicate a wildcard. Like with :meth:`register_message_callback`, more
specific callbacks win over less specific callbacks. The fallback order
is identical, except that the ``type_=None`` entries described there do
not apply for presence stanzas and are thus omitted.
See :meth:`.SimpleStanzaDispatcher.register_callback` for the exact
wildcarding rules.
.. versionchanged:: 0.7
The `type_` argument is now supposed to be a
:class:`~.PresenceType` member.
.. deprecated:: 0.7
Passing a :class:`str` as `type_` argument is deprecated and will
raise a :class:`TypeError` as of the 1.0 release. See the Changelog
for :ref:`api-changelog-0.7` for further details on how to upgrade
your code efficiently.
.. deprecated:: 0.9
This method has been deprecated. It is recommended to use
:class:`aioxmpp.PresenceClient` instead.
|
def com_google_fonts_check_metadata_valid_name_values(style,
font_metadata,
font_familynames,
typographic_familynames):
"""METADATA.pb font.name field contains font name in right format?"""
from fontbakery.constants import RIBBI_STYLE_NAMES
if style in RIBBI_STYLE_NAMES:
familynames = font_familynames
else:
familynames = typographic_familynames
failed = False
for font_familyname in familynames:
if font_familyname not in font_metadata.name:
failed = True
yield FAIL, ("METADATA.pb font.name field (\"{}\")"
" does not match correct font name format (\"{}\")."
"").format(font_metadata.name,
font_familyname)
if not failed:
yield PASS, ("METADATA.pb font.name field contains"
" font name in right format.")
|
METADATA.pb font.name field contains font name in right format?
|
def default(self, obj):
"""Encode more types."""
if isinstance(obj, UUID):
return obj.hex
elif isinstance(obj, datetime.datetime):
return obj.isoformat()
return super().default(obj)
|
Encode more types.
|
def post_run(self, outline=False, dump=False, *args, **kwargs):
"""Any steps that need to be taken after running the action."""
hooks = self.context.config.post_build
handle_hooks(
"post_build",
hooks,
self.provider,
self.context,
dump,
outline
)
|
Any steps that need to be taken after running the action.
|
def list_instances_json(self, application=None, show_only_destroyed=False):
""" Get list of instances in json format converted to list"""
# todo: application should not be parameter here. Application should do its own list, just in sake of code reuse
q_filter = {'sortBy': 'byCreation', 'descending': 'true',
'mode': 'short',
'from': '0', 'to': '10000'}
if not show_only_destroyed:
q_filter['showDestroyed'] = 'false'
else:
q_filter['showDestroyed'] = 'true'
q_filter['showRunning'] = 'false'
q_filter['showError'] = 'false'
q_filter['showLaunching'] = 'false'
if application:
q_filter["applicationFilterId"] = application.applicationId
resp_json = self._router.get_instances(org_id=self.organizationId, params=q_filter).json()
if type(resp_json) == dict:
instances = [instance for g in resp_json['groups'] for instance in g['records']]
else: # TODO: This is compatibility fix for platform < 37.1
instances = resp_json
return instances
|
Get list of instances in json format converted to list
|
def list_domains(self):
"""Utility method to list all the domains in the jar."""
domains = []
for cookie in iter(self):
if cookie.domain not in domains:
domains.append(cookie.domain)
return domains
|
Utility method to list all the domains in the jar.
|
def unpack_from(self, data, offset=0):
"""See :func:`~bitstruct.unpack_from_dict()`.
"""
return {info.name: v for info, v in self.unpack_from_any(data, offset)}
|
See :func:`~bitstruct.unpack_from_dict()`.
|
def setMinimumPixmapSize(self, size):
"""
Sets the minimum pixmap size that will be displayed to the user
for the dock widget.
:param size | <int>
"""
self._minimumPixmapSize = size
position = self.position()
self._position = None
self.setPosition(position)
|
Sets the minimum pixmap size that will be displayed to the user
for the dock widget.
:param size | <int>
|
def parse_end_date(self, request, start_date):
"""
Return period in days after the start date to show event occurrences,
which is one of the following in order of priority:
- `end_date` GET parameter value, if given and valid. The filtering
will be *inclusive* of the end date: until end-of-day of this date
- `days_to_show` GET parameter value, if given and valid
- page's `default_days_to_show` if set
- the value of the app setting `DEFAULT_DAYS_TO_SHOW`
"""
if request.GET.get('end_date'):
try:
return djtz.parse('%s 00:00' % request.GET.get('end_date'))
except ValueError:
pass
days_to_show = self.default_days_to_show or \
appsettings.DEFAULT_DAYS_TO_SHOW
if 'days_to_show' in request.GET:
try:
days_to_show = int(request.GET.get('days_to_show'))
except ValueError:
pass
return start_date + timedelta(days=days_to_show)
|
Return period in days after the start date to show event occurrences,
which is one of the following in order of priority:
- `end_date` GET parameter value, if given and valid. The filtering
will be *inclusive* of the end date: until end-of-day of this date
- `days_to_show` GET parameter value, if given and valid
- page's `default_days_to_show` if set
- the value of the app setting `DEFAULT_DAYS_TO_SHOW`
|
def CreateKey(self, prikey=None):
"""
Create a KeyPair and store it encrypted in the database.
Args:
private_key (iterable_of_ints): (optional) 32 byte private key.
Returns:
KeyPair: a KeyPair instance.
"""
account = super(UserWallet, self).CreateKey(private_key=prikey)
self.OnCreateAccount(account)
contract = WalletContract.CreateSignatureContract(account.PublicKey)
self.AddContract(contract)
return account
|
Create a KeyPair and store it encrypted in the database.
Args:
private_key (iterable_of_ints): (optional) 32 byte private key.
Returns:
KeyPair: a KeyPair instance.
|
def read_magic_file(self, path, sort_by_this_name, sort_by_file_type=False):
"""
read a magic-formatted tab-delimited file.
return a dictionary of dictionaries, with this format:
{'Z35.5a': {'specimen_weight': '1.000e-03', 'er_citation_names': 'This study', 'specimen_volume': '', 'er_location_name': '', 'er_site_name': 'Z35.', 'er_sample_name': 'Z35.5', 'specimen_class': '', 'er_specimen_name': 'Z35.5a', 'specimen_lithology': '', 'specimen_type': ''}, ....}
"""
DATA = {}
with open(path, 'r') as fin:
lines = list(fin.readlines())
first_line = lines[0]
if not first_line:
return False, None, 'empty_file'
if first_line[0] == "s" or first_line[1] == "s":
delim = ' '
elif first_line[0] == "t" or first_line[1] == "t":
delim = '\t'
else:
print('-W- error reading ', path)
return False, None, 'bad_file'
file_type = first_line.strip('\n').split(delim)[1]
if sort_by_file_type:
item_type = file_type.split('_')[1][:-1]
if item_type == 'age':
sort_by_this_name = "by_line_number"
else:
sort_by_this_name = 'er_' + item_type + '_name'
line = lines[1]
header = line.strip('\n').split(delim)
counter = 0
for line in lines[2:]:
tmp_data = {}
tmp_line = line.strip('\n').split(delim)
for i in range(len(header)):
if i < len(tmp_line):
tmp_data[header[i]] = tmp_line[i].strip()
else:
tmp_data[header[i]] = ""
if sort_by_this_name == "by_line_number":
DATA[counter] = tmp_data
counter += 1
else:
if tmp_data[sort_by_this_name] != "":
DATA[tmp_data[sort_by_this_name]] = tmp_data
return DATA, header, file_type
|
read a magic-formatted tab-delimited file.
return a dictionary of dictionaries, with this format:
{'Z35.5a': {'specimen_weight': '1.000e-03', 'er_citation_names': 'This study', 'specimen_volume': '', 'er_location_name': '', 'er_site_name': 'Z35.', 'er_sample_name': 'Z35.5', 'specimen_class': '', 'er_specimen_name': 'Z35.5a', 'specimen_lithology': '', 'specimen_type': ''}, ....}
|
def _calc_rms(mol1, mol2, clabel1, clabel2):
"""
Calculate the RMSD.
Args:
mol1: The first molecule. OpenBabel OBMol or pymatgen Molecule
object
mol2: The second molecule. OpenBabel OBMol or pymatgen Molecule
object
clabel1: The atom indices that can reorder the first molecule to
uniform atom order
clabel1: The atom indices that can reorder the second molecule to
uniform atom order
Returns:
The RMSD.
"""
obmol1 = BabelMolAdaptor(mol1).openbabel_mol
obmol2 = BabelMolAdaptor(mol2).openbabel_mol
cmol1 = ob.OBMol()
for i in clabel1:
oa1 = obmol1.GetAtom(i)
a1 = cmol1.NewAtom()
a1.SetAtomicNum(oa1.GetAtomicNum())
a1.SetVector(oa1.GetVector())
cmol2 = ob.OBMol()
for i in clabel2:
oa2 = obmol2.GetAtom(i)
a2 = cmol2.NewAtom()
a2.SetAtomicNum(oa2.GetAtomicNum())
a2.SetVector(oa2.GetVector())
aligner = ob.OBAlign(True, False)
aligner.SetRefMol(cmol1)
aligner.SetTargetMol(cmol2)
aligner.Align()
return aligner.GetRMSD()
|
Calculate the RMSD.
Args:
mol1: The first molecule. OpenBabel OBMol or pymatgen Molecule
object
mol2: The second molecule. OpenBabel OBMol or pymatgen Molecule
object
clabel1: The atom indices that can reorder the first molecule to
uniform atom order
clabel1: The atom indices that can reorder the second molecule to
uniform atom order
Returns:
The RMSD.
|
async def dump_tuple(self, elem, elem_type, params=None, obj=None):
"""
Dumps tuple of elements to the writer.
:param elem:
:param elem_type:
:param params:
:param obj:
:return:
"""
if len(elem) != len(elem_type.f_specs()):
raise ValueError('Fixed size tuple has not defined size: %s' % len(elem_type.f_specs()))
elem_fields = params[0] if params else None
if elem_fields is None:
elem_fields = elem_type.f_specs()
obj = [] if obj is None else x.get_elem(obj)
for idx, elem in enumerate(elem):
try:
self.tracker.push_index(idx)
fvalue = await self._dump_field(elem, elem_fields[idx], params[1:] if params else None)
obj.append(fvalue)
self.tracker.pop()
except Exception as e:
raise helpers.ArchiveException(e, tracker=self.tracker) from e
return obj
|
Dumps tuple of elements to the writer.
:param elem:
:param elem_type:
:param params:
:param obj:
:return:
|
def cross_entropy_loss(logits, one_hot_labels, label_smoothing=0,
weight=1.0, scope=None):
"""Define a Cross Entropy loss using softmax_cross_entropy_with_logits.
It can scale the loss by weight factor, and smooth the labels.
Args:
logits: [batch_size, num_classes] logits outputs of the network .
one_hot_labels: [batch_size, num_classes] target one_hot_encoded labels.
label_smoothing: if greater than 0 then smooth the labels.
weight: scale the loss by this factor.
scope: Optional scope for name_scope.
Returns:
A tensor with the softmax_cross_entropy loss.
"""
logits.get_shape().assert_is_compatible_with(one_hot_labels.get_shape())
with tf.name_scope(scope, 'CrossEntropyLoss', [logits, one_hot_labels]):
num_classes = one_hot_labels.get_shape()[-1].value
one_hot_labels = tf.cast(one_hot_labels, logits.dtype)
if label_smoothing > 0:
smooth_positives = 1.0 - label_smoothing
smooth_negatives = label_smoothing / num_classes
one_hot_labels = one_hot_labels * smooth_positives + smooth_negatives
cross_entropy = tf.contrib.nn.deprecated_flipped_softmax_cross_entropy_with_logits(
logits, one_hot_labels, name='xentropy')
weight = tf.convert_to_tensor(weight,
dtype=logits.dtype.base_dtype,
name='loss_weight')
loss = tf.multiply(weight, tf.reduce_mean(cross_entropy), name='value')
tf.add_to_collection(LOSSES_COLLECTION, loss)
return loss
|
Define a Cross Entropy loss using softmax_cross_entropy_with_logits.
It can scale the loss by weight factor, and smooth the labels.
Args:
logits: [batch_size, num_classes] logits outputs of the network .
one_hot_labels: [batch_size, num_classes] target one_hot_encoded labels.
label_smoothing: if greater than 0 then smooth the labels.
weight: scale the loss by this factor.
scope: Optional scope for name_scope.
Returns:
A tensor with the softmax_cross_entropy loss.
|
def getSimpleFileData(self, fileInfo, data):
"""Function to initialize the simple data for file info"""
result = fileInfo[fileInfo.find(data + "</td>"):]
result = result[:result.find("</A></td>")]
result = result[result.rfind(">") + 1:]
return result
|
Function to initialize the simple data for file info
|
def transform_to_matrices(transform):
"""
Convert an SVG transform string to an array of matrices.
> transform = "rotate(-10 50 100)
translate(-36 45.5)
skewX(40)
scale(1 0.5)"
Parameters
-----------
transform : str
Contains transformation information in SVG form
Returns
-----------
matrices : (n, 3, 3) float
Multiple transformation matrices from input transform string
"""
# split the transform string in to components of:
# (operation, args) i.e. (translate, '-1.0, 2.0')
components = [
[j.strip() for j in i.strip().split('(') if len(j) > 0]
for i in transform.lower().split(')') if len(i) > 0]
# store each matrix without dotting
matrices = []
for line in components:
if len(line) == 0:
continue
elif len(line) != 2:
raise ValueError('should always have two components!')
key, args = line
# convert string args to array of floats
# support either comma or space delimiter
values = np.array([float(i) for i in
args.replace(',', ' ').split()])
if key == 'translate':
# convert translation to a (3, 3) homogenous matrix
matrices.append(np.eye(3))
matrices[-1][:2, 2] = values
elif key == 'matrix':
# [a b c d e f] ->
# [[a c e],
# [b d f],
# [0 0 1]]
matrices.append(np.vstack((
values.reshape((3, 2)).T, [0, 0, 1])))
elif key == 'rotate':
# SVG rotations are in degrees
angle = np.degrees(values[0])
# if there are three values rotate around point
if len(values) == 3:
point = values[1:]
else:
point = None
matrices.append(planar_matrix(theta=angle,
point=point))
elif key == 'scale':
# supports (x_scale, y_scale) or (scale)
mat = np.eye(3)
mat[:2, :2] *= values
matrices.append(mat)
else:
log.warning('unknown SVG transform: {}'.format(key))
return matrices
|
Convert an SVG transform string to an array of matrices.
> transform = "rotate(-10 50 100)
translate(-36 45.5)
skewX(40)
scale(1 0.5)"
Parameters
-----------
transform : str
Contains transformation information in SVG form
Returns
-----------
matrices : (n, 3, 3) float
Multiple transformation matrices from input transform string
|
def rpc(self, address, rpc_id):
"""Call an RPC and receive the result as an integer.
If the RPC does not properly return a 32 bit integer, raise a warning
unless it cannot be converted into an integer at all, in which case
a HardwareError is thrown.
Args:
address (int): The address of the tile we want to call the RPC
on
rpc_id (int): The id of the RPC that we want to call
Returns:
int: The result of the RPC call. If the rpc did not succeed
an error is thrown instead.
"""
# Always allow mocking an RPC to override whatever the defaul behavior is
if address in self.mock_rpcs and rpc_id in self.mock_rpcs[address]:
value = self.mock_rpcs[address][rpc_id]
return value
result = self._call_rpc(address, rpc_id, bytes())
if len(result) != 4:
self.warn(u"RPC 0x%X on address %d: response had invalid length %d not equal to 4" % (rpc_id, address, len(result)))
if len(result) < 4:
raise HardwareError("Response from RPC was not long enough to parse as an integer", rpc_id=rpc_id, address=address, response_length=len(result))
if len(result) > 4:
result = result[:4]
res, = struct.unpack("<L", result)
return res
|
Call an RPC and receive the result as an integer.
If the RPC does not properly return a 32 bit integer, raise a warning
unless it cannot be converted into an integer at all, in which case
a HardwareError is thrown.
Args:
address (int): The address of the tile we want to call the RPC
on
rpc_id (int): The id of the RPC that we want to call
Returns:
int: The result of the RPC call. If the rpc did not succeed
an error is thrown instead.
|
def build(self, grad_list, get_opt_fn):
"""
Args:
grad_list ([[(grad, var), ...], ...]): #GPU lists to be reduced. Each is the gradients computed on each GPU.
get_opt_fn (-> tf.train.Optimizer): callable which returns an optimizer
Returns:
tf.Operation: the training op
"""
assert len(grad_list) == len(self.towers)
DataParallelBuilder._check_grad_list(grad_list)
if self._scale_gradient and len(self.towers) > 1:
# pretend to average the grads, in order to make async and
# sync have consistent effective learning rate
gradproc = ScaleGradient(('.*', 1.0 / len(self.towers)), verbose=False)
grad_list = [gradproc.process(gv) for gv in grad_list]
# Ngpu x Nvar x 2
train_ops = []
opt = get_opt_fn()
with tf.name_scope('async_apply_gradients'):
for i, grad_and_vars in enumerate(zip(*grad_list)):
# Ngpu x 2
v = grad_and_vars[0][1]
with tf.device(v.device):
# will call apply_gradients (therefore gradproc) multiple times
train_ops.append(opt.apply_gradients(
grad_and_vars, name='apply_grad_{}'.format(i)))
return tf.group(*train_ops, name='train_op')
|
Args:
grad_list ([[(grad, var), ...], ...]): #GPU lists to be reduced. Each is the gradients computed on each GPU.
get_opt_fn (-> tf.train.Optimizer): callable which returns an optimizer
Returns:
tf.Operation: the training op
|
def check_valid_temperature(var, units):
r"""Check that variable is air temperature."""
check_valid(var, 'standard_name', 'air_temperature')
check_valid(var, 'units', units)
assert_daily(var)
|
r"""Check that variable is air temperature.
|
def VerifyStructure(self, parser_mediator, line):
"""Verify that this file is a Mac AppFirewall log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not.
"""
self._last_month = 0
self._year_use = parser_mediator.GetEstimatedYear()
try:
structure = self.FIREWALL_LINE.parseString(line)
except pyparsing.ParseException as exception:
logger.debug((
'Unable to parse file as a Mac AppFirewall log file with error: '
'{0!s}').format(exception))
return False
if structure.action != 'creating /var/log/appfirewall.log':
logger.debug(
'Not a Mac AppFirewall log file, invalid action: {0!s}'.format(
structure.action))
return False
if structure.status != 'Error':
logger.debug(
'Not a Mac AppFirewall log file, invalid status: {0!s}'.format(
structure.status))
return False
time_elements_tuple = self._GetTimeElementsTuple(structure)
try:
dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
except ValueError:
logger.debug((
'Not a Mac AppFirewall log file, invalid date and time: '
'{0!s}').format(structure.date_time))
return False
self._last_month = time_elements_tuple[1]
return True
|
Verify that this file is a Mac AppFirewall log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not.
|
def to_text(self, filename=None, overwrite=True):
'''
Write this catalog out to a text file.
'''
table = self.standardized
#table = hstack([self.identifiers,
# self._coordinate_table(),
# self.magnitudes,
# self.errors])
if filename == None:
filename = '{}.txt'.format(self.name)
self.speak('saving to {}'.format(filename))
table.write(filename, format='ascii.ecsv', overwrite=overwrite)
|
Write this catalog out to a text file.
|
def _get_args(self, executable, *args):
"""compile all the executable and the arguments, combining with common arguments
to create a full batch of command args"""
args = list(args)
args.insert(0, executable)
if self.username:
args.append("--username={}".format(self.username))
if self.host:
args.append("--host={}".format(self.host))
if self.port:
args.append("--port={}".format(self.port))
args.append(self.dbname)
#args.extend(other_args)
return args
|
compile all the executable and the arguments, combining with common arguments
to create a full batch of command args
|
def generate(self):
"""Returns (ts, rvs), where ts is a list of arrays of
observation times (one array for each observatory), and rvs is
a corresponding list of total radial velocity measurements."""
ts=self.generate_tobs()
noise=self.generate_noise(ts)
rvs=[]
for t,n in zip(ts, noise):
rvs.append(n + np.sum(rv.rv_model(t, self.params), axis=0))
return ts,rvs
|
Returns (ts, rvs), where ts is a list of arrays of
observation times (one array for each observatory), and rvs is
a corresponding list of total radial velocity measurements.
|
def GetServices(self,filename):
"""Returns a list of service objects handling this file type"""
objlist=[]
for sobj in self.services:
if sobj.KnowsFile(filename) :
objlist.append(sobj)
if len(objlist)==0:
return None
return objlist
|
Returns a list of service objects handling this file type
|
def GetPathInfo(self, timestamp=None):
"""Generates a summary about the path record.
Args:
timestamp: A point in time from which the data should be retrieved.
Returns:
A `rdf_objects.PathInfo` instance.
"""
path_info_timestamp = self._LastEntryTimestamp(self._path_infos, timestamp)
try:
result = self._path_infos[path_info_timestamp].Copy()
except KeyError:
result = rdf_objects.PathInfo(
path_type=self._path_type, components=self._components)
stat_entry_timestamp = self._LastEntryTimestamp(self._stat_entries,
timestamp)
result.last_stat_entry_timestamp = stat_entry_timestamp
result.stat_entry = self._stat_entries.get(stat_entry_timestamp)
hash_entry_timestamp = self._LastEntryTimestamp(self._hash_entries,
timestamp)
result.last_hash_entry_timestamp = hash_entry_timestamp
result.hash_entry = self._hash_entries.get(hash_entry_timestamp)
return result
|
Generates a summary about the path record.
Args:
timestamp: A point in time from which the data should be retrieved.
Returns:
A `rdf_objects.PathInfo` instance.
|
def asynloop(self, auto_connect=False, timeout=10, detached_delay=0.2):
"""
Non-blocking event loop consuming messages until connection is lost,
or shutdown is requested.
:param int timeout: number of secs for asyncore timeout
:param float detached_delay: float secs to sleep when exiting asyncore loop and execution detached queue
callbacks
"""
if auto_connect:
self.broadcastEvent(YowLayerEvent(YowNetworkLayer.EVENT_STATE_CONNECT))
try:
self.listening = True
start = int(time.time())
while True:
asyncore.loop(timeout)
time.sleep(detached_delay)
try:
# Execute from detached queue callback
callback = self.detached_queue.get(False)
callback()
except Queue.Empty:
pass
if int(time.time()) - start > timeout:
logger.info("Asynloop : Timeout")
# defensive code should be already disconneted
if self.facade.connected():
self.broadcastEvent(YowLayerEvent(YowNetworkLayer.EVENT_STATE_DISCONNECT))
break
self.cleanup()
except AuthError as e:
self.cleanup()
raise exceptions.AuthenticationError("Authentication Error: {0}".format(e))
except:
self.cleanup()
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
raise exceptions.UnexpectedError(str(exc_info[0]))
|
Non-blocking event loop consuming messages until connection is lost,
or shutdown is requested.
:param int timeout: number of secs for asyncore timeout
:param float detached_delay: float secs to sleep when exiting asyncore loop and execution detached queue
callbacks
|
def system_types():
'''
List the system types that are supported by the installed version of sfdisk
CLI Example:
.. code-block:: bash
salt '*' partition.system_types
'''
ret = {}
for line in __salt__['cmd.run']('sfdisk -T').splitlines():
if not line:
continue
if line.startswith('Id'):
continue
comps = line.strip().split()
ret[comps[0]] = comps[1]
return ret
|
List the system types that are supported by the installed version of sfdisk
CLI Example:
.. code-block:: bash
salt '*' partition.system_types
|
def _to_numpy(Z):
"""Converts a None, list, np.ndarray, or torch.Tensor to np.ndarray;
also handles converting sparse input to dense."""
if Z is None:
return Z
elif issparse(Z):
return Z.toarray()
elif isinstance(Z, np.ndarray):
return Z
elif isinstance(Z, list):
return np.array(Z)
elif isinstance(Z, torch.Tensor):
return Z.cpu().numpy()
else:
msg = (
f"Expected None, list, numpy.ndarray or torch.Tensor, "
f"got {type(Z)} instead."
)
raise Exception(msg)
|
Converts a None, list, np.ndarray, or torch.Tensor to np.ndarray;
also handles converting sparse input to dense.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.