code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def check_exclamations_ppm(text):
"""Make sure that the exclamation ppm is under 30."""
err = "leonard.exclamation.30ppm"
msg = u"More than 30 ppm of exclamations. Keep them under control."
regex = r"\w!"
count = len(re.findall(regex, text))
num_words = len(text.split(" "))
ppm = (count*1.0 / num_words) * 1e6
if ppm > 30 and count > 1:
loc = re.search(regex, text).start() + 1
return [(loc, loc+1, err, msg, ".")]
else:
return []
|
Make sure that the exclamation ppm is under 30.
|
def sign_more(self, bucket, cos_path, expired):
"""多次签名(针对上传文件,创建目录, 获取文件目录属性, 拉取目录列表)
:param bucket: bucket名称
:param cos_path: 要操作的cos路径, 以'/'开始
:param expired: 签名过期时间, UNIX时间戳, 如想让签名在30秒后过期, 即可将expired设成当前时间加上30秒
:return: 签名字符串
"""
return self.app_sign(bucket, cos_path, expired)
|
多次签名(针对上传文件,创建目录, 获取文件目录属性, 拉取目录列表)
:param bucket: bucket名称
:param cos_path: 要操作的cos路径, 以'/'开始
:param expired: 签名过期时间, UNIX时间戳, 如想让签名在30秒后过期, 即可将expired设成当前时间加上30秒
:return: 签名字符串
|
def recommend(self, client_data, limit, extra_data={}):
"""
Hybrid recommendations simply select half recommendations from
the ensemble recommender, and half from the curated one.
Duplicate recommendations are accomodated by rank ordering
by weight.
"""
preinstalled_addon_ids = client_data.get("installed_addons", [])
# Compute an extended limit by adding the length of
# the list of any preinstalled addons.
extended_limit = limit + len(preinstalled_addon_ids)
ensemble_suggestions = self._ensemble_recommender.recommend(
client_data, extended_limit, extra_data
)
curated_suggestions = self._curated_recommender.recommend(
client_data, extended_limit, extra_data
)
# Generate a set of results from each of the composite
# recommenders. We select one item from each recommender
# sequentially so that we do not bias one recommender over the
# other.
merged_results = set()
while (
len(merged_results) < limit
and len(ensemble_suggestions) > 0
and len(curated_suggestions) > 0
):
r1 = ensemble_suggestions.pop()
if r1[0] not in [temp[0] for temp in merged_results]:
merged_results.add(r1)
# Terminate early if we have an odd number for the limit
if not (
len(merged_results) < limit
and len(ensemble_suggestions) > 0
and len(curated_suggestions) > 0
):
break
r2 = curated_suggestions.pop()
if r2[0] not in [temp[0] for temp in merged_results]:
merged_results.add(r2)
if len(merged_results) < limit:
msg = (
"Defaulting to empty results. Insufficient recommendations found for client: %s"
% client_data["client_id"]
)
self.logger.info(msg)
return []
sorted_results = sorted(
list(merged_results), key=op.itemgetter(1), reverse=True
)
log_data = (client_data["client_id"], str([r[0] for r in sorted_results]))
self.logger.info(
"Hybrid recommendations client_id: [%s], guids: [%s]" % log_data
)
return sorted_results
|
Hybrid recommendations simply select half recommendations from
the ensemble recommender, and half from the curated one.
Duplicate recommendations are accomodated by rank ordering
by weight.
|
def populate(self, priority, address, rtr, data):
"""
:return: None
"""
assert isinstance(data, bytes)
self.needs_high_priority(priority)
self.needs_no_rtr(rtr)
self.needs_data(data, 4)
self.set_attributes(priority, address, rtr)
# 00000011 = channel 1
# 00001100 = channel 2
# so shift 1 bit to the right + and with 03
tmp = (data[0] >> 1) & 0x03
print(tmp)
self.channel = self.byte_to_channel(tmp)
self.needs_valid_channel(self.channel, 2)
(self.delay_time,) = struct.unpack('>L', bytes([0]) + data[1:])
|
:return: None
|
def _add_video_timing(self, pic):
"""Add a `p:video` element under `p:sld/p:timing`.
The element will refer to the specified *pic* element by its shape
id, and cause the video play controls to appear for that video.
"""
sld = self._spTree.xpath('/p:sld')[0]
childTnLst = sld.get_or_add_childTnLst()
childTnLst.add_video(pic.shape_id)
|
Add a `p:video` element under `p:sld/p:timing`.
The element will refer to the specified *pic* element by its shape
id, and cause the video play controls to appear for that video.
|
def new_evaluation_result(self, has_improved: bool) -> bool:
"""
Returns true if the parameters should be reset to the ones with the best validation score.
:param has_improved: Whether the model improved on held-out validation data.
:return: True if parameters should be reset to the ones with best validation score.
"""
if self.lr is None:
assert self.base_lr is not None
self.lr = self.base_lr
if has_improved:
self.num_not_improved = 0
else:
self.num_not_improved += 1
if self.num_not_improved >= self.reduce_num_not_improved and self.reduce_factor < 1.0 and self.warmed_up:
old_lr = self.lr
self.lr *= self.reduce_factor
logger.info("%d checkpoints since improvement or rate scaling, "
"lowering learning rate: %1.2e -> %1.2e", self.num_not_improved, old_lr, self.lr)
self.num_not_improved = 0
return True
return False
|
Returns true if the parameters should be reset to the ones with the best validation score.
:param has_improved: Whether the model improved on held-out validation data.
:return: True if parameters should be reset to the ones with best validation score.
|
def getFlaskResponse(responseString, httpStatus=200):
"""
Returns a Flask response object for the specified data and HTTP status.
"""
return flask.Response(responseString, status=httpStatus, mimetype=MIMETYPE)
|
Returns a Flask response object for the specified data and HTTP status.
|
def which(program, ignore_own_venv=False):
"""
:param str|None program: Program name to find via env var PATH
:param bool ignore_own_venv: If True, do not resolve to executables in current venv
:return str|None: Full path to program, if one exists and is executable
"""
if not program:
return None
if os.path.isabs(program):
return program if is_executable(program) else None
for p in os.environ.get("PATH", "").split(":"):
fp = os.path.join(p, program)
if (not ignore_own_venv or not fp.startswith(sys.prefix)) and is_executable(fp):
return fp
return None
|
:param str|None program: Program name to find via env var PATH
:param bool ignore_own_venv: If True, do not resolve to executables in current venv
:return str|None: Full path to program, if one exists and is executable
|
def trim(self, lower=None, upper=None):
"""Trim upper values in accordance with
:math:`EQI2 \\leq EQI1 \\leq EQB`.
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> eqb.value = 3.0
>>> eqi2.value = 1.0
>>> eqi1(0.0)
>>> eqi1
eqi1(1.0)
>>> eqi1(1.0)
>>> eqi1
eqi1(1.0)
>>> eqi1(2.0)
>>> eqi1
eqi1(2.0)
>>> eqi1(3.0)
>>> eqi1
eqi1(3.0)
>>> eqi1(4.0)
>>> eqi1
eqi1(3.0)
"""
if lower is None:
lower = getattr(self.subpars.eqi2, 'value', None)
if upper is None:
upper = getattr(self.subpars.eqb, 'value', None)
super().trim(lower, upper)
|
Trim upper values in accordance with
:math:`EQI2 \\leq EQI1 \\leq EQB`.
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> eqb.value = 3.0
>>> eqi2.value = 1.0
>>> eqi1(0.0)
>>> eqi1
eqi1(1.0)
>>> eqi1(1.0)
>>> eqi1
eqi1(1.0)
>>> eqi1(2.0)
>>> eqi1
eqi1(2.0)
>>> eqi1(3.0)
>>> eqi1
eqi1(3.0)
>>> eqi1(4.0)
>>> eqi1
eqi1(3.0)
|
def _build_biomart_gene_query(self, taxid, cols_to_fetch):
"""
Building url to fetch equivalent identifiers via Biomart Restful API.
Documentation at
http://uswest.ensembl.org/info/data/biomart/biomart_restful.html
:param taxid:
:param array of ensembl biomart attributes to include
:return:
"""
taxid = str(taxid)
# basic stuff for ensembl ids.
if taxid != '9606': # drop hgnc column
cols_to_fetch = [x for x in cols_to_fetch if x != 'hgnc_id']
# LOG.info('Build BMQ with taxon %s and mapping %s', taxid, self.localtt)
query_attributes = {
"virtualSchemaName": "default", "formatter": "TSV", "header": "0",
"uniqueRows": "1", "count": "0", "datasetConfigVersion": "0.6"}
qry = etree.Element("Query", query_attributes)
if taxid in self.localtt:
object_attributes = {"name": self.localtt[taxid], "interface": "default"}
dataset = etree.SubElement(qry, "Dataset", object_attributes)
for col in cols_to_fetch:
etree.SubElement(dataset, "Attribute", {"name": col})
# is indent right?
query = '<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE Query>' \
+ etree.tostring(qry, encoding="unicode")
else:
LOG.warning("not finding taxon %s in the local translation table", taxid)
query = None
return query
|
Building url to fetch equivalent identifiers via Biomart Restful API.
Documentation at
http://uswest.ensembl.org/info/data/biomart/biomart_restful.html
:param taxid:
:param array of ensembl biomart attributes to include
:return:
|
def parse_command_line_parameters():
""" Parses command line arguments """
usage = 'usage: %prog [options] fasta_filepath'
version = 'Version: %prog 0.1'
parser = OptionParser(usage=usage, version=version)
# A binary 'verbose' flag
parser.add_option('-p', '--is_protein', action='store_true',
dest='is_protein', default=False,
help='Pass if building db of protein sequences [default:'
' False, nucleotide db]')
parser.add_option('-o', '--output_dir', action='store', type='string',
dest='output_dir', default=None,
help='the output directory [default: directory '
'containing input fasta_filepath]')
opts, args = parser.parse_args()
num_args = 1
if len(args) != num_args:
parser.error('Must provide single filepath to build database from.')
return opts, args
|
Parses command line arguments
|
def fit_left_censoring(
self,
durations,
event_observed=None,
timeline=None,
label=None,
alpha=None,
ci_labels=None,
show_progress=False,
entry=None,
weights=None,
): # pylint: disable=too-many-arguments
"""
Fit the model to a left-censored dataset
Parameters
----------
durations: an array, or pd.Series
length n, duration subject was observed for
event_observed: numpy array or pd.Series, optional
length n, True if the the death was observed, False if the event was lost (right-censored). Defaults all True if event_observed==None
timeline: list, optional
return the estimate at the values in timeline (positively increasing)
label: string, optional
a string to name the column of the estimate.
alpha: float, optional
the alpha value in the confidence intervals. Overrides the initializing
alpha for this call to fit only.
ci_labels: list, optional
add custom column names to the generated confidence intervals as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<alpha>
show_progress: boolean, optional
since this is an iterative fitting algorithm, switching this to True will display some iteration details.
entry: an array, or pd.Series, of length n
relative time when a subject entered the study. This is useful for left-truncated (not left-censored) observations. If None, all members of the population
entered study when they were "born": time zero.
weights: an array, or pd.Series, of length n
integer weights per observation
Returns
-------
self
self with new properties like ``cumulative_hazard_``, ``survival_function_``
"""
self.durations = np.asarray(pass_for_numeric_dtypes_or_raise_array(durations))
check_nans_or_infs(self.durations)
check_positivity(self.durations)
self._censoring_type = CensoringType.LEFT
return self._fit(
(None, self.durations),
event_observed=event_observed,
timeline=timeline,
label=label,
alpha=alpha,
ci_labels=ci_labels,
show_progress=show_progress,
entry=entry,
weights=weights,
)
|
Fit the model to a left-censored dataset
Parameters
----------
durations: an array, or pd.Series
length n, duration subject was observed for
event_observed: numpy array or pd.Series, optional
length n, True if the the death was observed, False if the event was lost (right-censored). Defaults all True if event_observed==None
timeline: list, optional
return the estimate at the values in timeline (positively increasing)
label: string, optional
a string to name the column of the estimate.
alpha: float, optional
the alpha value in the confidence intervals. Overrides the initializing
alpha for this call to fit only.
ci_labels: list, optional
add custom column names to the generated confidence intervals as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<alpha>
show_progress: boolean, optional
since this is an iterative fitting algorithm, switching this to True will display some iteration details.
entry: an array, or pd.Series, of length n
relative time when a subject entered the study. This is useful for left-truncated (not left-censored) observations. If None, all members of the population
entered study when they were "born": time zero.
weights: an array, or pd.Series, of length n
integer weights per observation
Returns
-------
self
self with new properties like ``cumulative_hazard_``, ``survival_function_``
|
def _absorb_z_into_w(moment_index: int,
op: ops.Operation,
state: _OptimizerState) -> None:
"""Absorbs a Z^t gate into a W(a) flip.
[Where W(a) is shorthand for PhasedX(phase_exponent=a).]
Uses the following identity:
───W(a)───Z^t───
≡ ───W(a)───────────Z^t/2──────────Z^t/2─── (split Z)
≡ ───W(a)───W(a)───Z^-t/2───W(a)───Z^t/2─── (flip Z)
≡ ───W(a)───W(a)──────────W(a+t/2)───────── (phase W)
≡ ────────────────────────W(a+t/2)───────── (cancel Ws)
≡ ───W(a+t/2)───
"""
t = cast(float, _try_get_known_z_half_turns(op))
q = op.qubits[0]
state.held_w_phases[q] = cast(float, state.held_w_phases[q]) + t / 2
state.deletions.append((moment_index, op))
|
Absorbs a Z^t gate into a W(a) flip.
[Where W(a) is shorthand for PhasedX(phase_exponent=a).]
Uses the following identity:
───W(a)───Z^t───
≡ ───W(a)───────────Z^t/2──────────Z^t/2─── (split Z)
≡ ───W(a)───W(a)───Z^-t/2───W(a)───Z^t/2─── (flip Z)
≡ ───W(a)───W(a)──────────W(a+t/2)───────── (phase W)
≡ ────────────────────────W(a+t/2)───────── (cancel Ws)
≡ ───W(a+t/2)───
|
def make_subscriber(self, my_args=None):
"""
not implemented
:return:
"""
LOGGER.debug("zeromq.Driver.make_subscriber")
if my_args is None:
raise exceptions.ArianeConfError('subscriber arguments')
if not self.configuration_OK or self.connection_args is None:
raise exceptions.ArianeConfError('zeromq connection arguments')
subscriber = Subscriber.start(my_args, self.connection_args).proxy()
self.subscribers_registry.append(subscriber)
return subscriber
|
not implemented
:return:
|
def apply_filter(self, strings):
"""
Apply the text filter filter to the given list of strings.
:param list strings: the list of input strings
"""
result = strings
for filt in self.filters:
result = filt.apply_filter(result)
self.log([u"Applying regex: '%s' => '%s'", strings, result])
return result
|
Apply the text filter filter to the given list of strings.
:param list strings: the list of input strings
|
def _get_login_manager(self,
app: FlaskUnchained,
anonymous_user: AnonymousUser,
) -> LoginManager:
"""
Get an initialized instance of Flask Login's
:class:`~flask_login.LoginManager`.
"""
login_manager = LoginManager()
login_manager.anonymous_user = anonymous_user or AnonymousUser
login_manager.localize_callback = _
login_manager.request_loader(self._request_loader)
login_manager.user_loader(
lambda *a, **kw: self.security_utils_service.user_loader(*a, **kw))
login_manager.login_view = 'security_controller.login'
login_manager.login_message = _(
'flask_unchained.bundles.security:error.login_required')
login_manager.login_message_category = 'info'
login_manager.needs_refresh_message = _(
'flask_unchained.bundles.security:error.fresh_login_required')
login_manager.needs_refresh_message_category = 'info'
login_manager.init_app(app)
return login_manager
|
Get an initialized instance of Flask Login's
:class:`~flask_login.LoginManager`.
|
def set_end_date(self, date):
"""Sets the end date.
arg: date (osid.calendaring.DateTime): the new date
raise: InvalidArgument - ``date`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``date`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
if date is None:
raise NullArgument('date cannot be None')
if self.get_end_date_metadata().is_read_only():
raise NoAccess()
if not self.my_osid_object_form._is_valid_date_time(date, self.get_end_date_metadata()):
raise InvalidArgument('date must be instance of DateTime')
self.my_osid_object_form._my_map['endDate'] = date
|
Sets the end date.
arg: date (osid.calendaring.DateTime): the new date
raise: InvalidArgument - ``date`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``date`` is ``null``
*compliance: mandatory -- This method must be implemented.*
|
def example_exc_handler(tries_remaining, exception, delay):
"""Example exception handler; prints a warning to stderr.
tries_remaining: The number of tries remaining.
exception: The exception instance which was raised.
"""
print >> stderr, "Caught '{0}', {1} tries remaining, \
sleeping for {2} seconds".format(exception, tries_remaining, delay)
|
Example exception handler; prints a warning to stderr.
tries_remaining: The number of tries remaining.
exception: The exception instance which was raised.
|
def build_notification_message(template_context, template_configuration=None):
"""
Create HTML and plaintext message bodies for a notification.
We receive a context with data we can use to render, as well as an optional site
template configration - if we don't get a template configuration, we'll use the
standard, built-in template.
Arguments:
template_context (dict): A set of data to render
template_configuration: A database-backed object with templates
stored that can be used to render a notification.
"""
if (
template_configuration is not None and
template_configuration.html_template and
template_configuration.plaintext_template
):
plain_msg, html_msg = template_configuration.render_all_templates(template_context)
else:
plain_msg = render_to_string(
'enterprise/emails/user_notification.txt',
template_context
)
html_msg = render_to_string(
'enterprise/emails/user_notification.html',
template_context
)
return plain_msg, html_msg
|
Create HTML and plaintext message bodies for a notification.
We receive a context with data we can use to render, as well as an optional site
template configration - if we don't get a template configuration, we'll use the
standard, built-in template.
Arguments:
template_context (dict): A set of data to render
template_configuration: A database-backed object with templates
stored that can be used to render a notification.
|
def scan_file(self, filename, apikey):
"""
Sends a file to virus total for assessment
"""
url = self.base_url + "file/scan"
params = {'apikey': apikey}
scanfile = {"file": open(filename, 'rb')}
response = requests.post(url, files=scanfile, params=params)
rate_limit_clear = self.rate_limit()
if rate_limit_clear:
if response.status_code == self.HTTP_OK:
json_response = response.json()
return json_response
elif response.status_code == self.HTTP_RATE_EXCEEDED:
time.sleep(20)
else:
self.logger.error("sent: %s, HTTP: %d", filename, response.status_code)
|
Sends a file to virus total for assessment
|
def check_docstring(cls):
"""
Asserts that the class has a docstring, returning it if successful.
"""
docstring = inspect.getdoc(cls)
if not docstring:
breadcrumbs = " -> ".join(t.__name__ for t in inspect.getmro(cls)[:-1][::-1])
msg = "docstring required for plugin '%s' (%s, defined in %s)"
args = (cls.__name__, breadcrumbs, cls.__module__)
raise InternalCashewException(msg % args)
max_line_length = cls._class_settings.get('max-docstring-length')
if max_line_length:
for i, line in enumerate(docstring.splitlines()):
if len(line) > max_line_length:
msg = "docstring line %s of %s is %s chars too long"
args = (i, cls.__name__, len(line) - max_line_length)
raise Exception(msg % args)
return docstring
|
Asserts that the class has a docstring, returning it if successful.
|
def schunk(string, size):
"""Splits string into n sized chunks."""
return [string[i:i+size] for i in range(0, len(string), size)]
|
Splits string into n sized chunks.
|
def __get_favorites(self, favorite_type, start=0, max_items=100):
""" Helper method for `get_favorite_radio_*` methods.
Args:
favorite_type (str): Specify either `RADIO_STATIONS` or
`RADIO_SHOWS`.
start (int): Which number to start the retrieval from. Used for
paging.
max_items (int): The total number of results to return.
"""
if favorite_type not in (RADIO_SHOWS, RADIO_STATIONS):
favorite_type = SONOS_FAVORITES
response = self.contentDirectory.Browse([
('ObjectID',
'FV:2' if favorite_type is SONOS_FAVORITES
else 'R:0/{0}'.format(favorite_type)),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', start),
('RequestedCount', max_items),
('SortCriteria', '')
])
result = {}
favorites = []
results_xml = response['Result']
if results_xml != '':
# Favorites are returned in DIDL-Lite format
metadata = XML.fromstring(really_utf8(results_xml))
for item in metadata.findall(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}container'
if favorite_type == RADIO_SHOWS else
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}item'):
favorite = {}
favorite['title'] = item.findtext(
'{http://purl.org/dc/elements/1.1/}title')
favorite['uri'] = item.findtext(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}res')
if favorite_type == SONOS_FAVORITES:
favorite['meta'] = item.findtext(
'{urn:schemas-rinconnetworks-com:metadata-1-0/}resMD')
favorites.append(favorite)
result['total'] = response['TotalMatches']
result['returned'] = len(favorites)
result['favorites'] = favorites
return result
|
Helper method for `get_favorite_radio_*` methods.
Args:
favorite_type (str): Specify either `RADIO_STATIONS` or
`RADIO_SHOWS`.
start (int): Which number to start the retrieval from. Used for
paging.
max_items (int): The total number of results to return.
|
def get_fetch_headers(self, method, headers):
"""merge class headers with passed in headers
:param method: string, (eg, GET or POST), this is passed in so you can customize
headers based on the method that you are calling
:param headers: dict, all the headers passed into the fetch method
:returns: passed in headers merged with global class headers
"""
all_headers = self.headers.copy()
if headers:
all_headers.update(headers)
return Headers(all_headers)
|
merge class headers with passed in headers
:param method: string, (eg, GET or POST), this is passed in so you can customize
headers based on the method that you are calling
:param headers: dict, all the headers passed into the fetch method
:returns: passed in headers merged with global class headers
|
def reinterpretBits(self, sigOrVal, toType):
"""
Cast object of same bit size between to other type
(f.e. bits to struct, union or array)
"""
if isinstance(sigOrVal, Value):
return reinterpretBits__val(self, sigOrVal, toType)
elif isinstance(toType, Bits):
return fitTo_t(sigOrVal, toType)
elif sigOrVal._dtype.bit_length() == toType.bit_length():
if isinstance(toType, HStruct):
raise reinterpret_bits_to_hstruct(sigOrVal, toType)
elif isinstance(toType, HUnion):
raise NotImplementedError()
elif isinstance(toType, HArray):
reinterpret_bits_to_harray(sigOrVal, toType)
return default_auto_cast_fn(self, sigOrVal, toType)
|
Cast object of same bit size between to other type
(f.e. bits to struct, union or array)
|
def coord_pyramids(coords, zoom_start, zoom_stop):
"""
generate full pyramid for coords
Generate the full pyramid for the list of coords. Note that zoom_stop is
exclusive.
"""
for coord in coords:
for child in coord_pyramid(coord, zoom_start, zoom_stop):
yield child
|
generate full pyramid for coords
Generate the full pyramid for the list of coords. Note that zoom_stop is
exclusive.
|
def _filter_properties(obj, property_list):
"""
Remove properties from an instance or class that aren't in the
plist parameter
obj(:class:`~pywbem.CIMClass` or :class:`~pywbem.CIMInstance):
The class or instance from which properties are to be filtered
property_list(list of :term:`string`):
List of properties which are to be included in the result. If
None, remove nothing. If empty list, remove everything. else
remove properties that are not in property_list. Duplicated names
are allowed in the list and ignored.
"""
if property_list is not None:
property_list = [p.lower() for p in property_list]
for pname in obj.properties.keys():
if pname.lower() not in property_list:
del obj.properties[pname]
|
Remove properties from an instance or class that aren't in the
plist parameter
obj(:class:`~pywbem.CIMClass` or :class:`~pywbem.CIMInstance):
The class or instance from which properties are to be filtered
property_list(list of :term:`string`):
List of properties which are to be included in the result. If
None, remove nothing. If empty list, remove everything. else
remove properties that are not in property_list. Duplicated names
are allowed in the list and ignored.
|
def simulated_annealing(objective_function,
initial_array,
initial_temperature=10 ** 4,
cooldown_rate=0.7,
acceptance_criteria=None,
lower_bound=-float('inf'),
max_iterations=10 ** 3):
"""
Implement a simulated annealing algorithm with exponential cooling
Has two stopping conditions:
1. Maximum number of iterations;
2. A known lower bound, a none is passed then this is not used.
Note that starting with an initial_temperature corresponds to a hill
climbing algorithm
"""
X = initial_array
if acceptance_criteria is not None:
acceptance_bound = acceptance_criteria(X)
best_X = X
iterations = 0
current_energy = objective_function(X)
best_energy = current_energy
temperature = initial_temperature
while current_energy > lower_bound and iterations <= max_iterations:
iterations += 1
candidate = element_from_neighbourhood(X)
candidate_energy = objective_function(candidate)
delta = candidate_energy - current_energy
if (candidate_energy < best_energy and
(acceptance_criteria is None or
acceptance_criteria(candidate) <= acceptance_bound)):
best_energy = candidate_energy
best_X = candidate
if delta < 0 or (temperature > 0 and
np.random.random() < np.exp(-delta / temperature)):
X = candidate
current_energy = candidate_energy
temperature *= (cooldown_rate) ** iterations
if lower_bound > -float('inf') and current_energy != lower_bound:
warnings.warn(f"Lower bound {lower_bound} not achieved after {max_iterations} iterations")
return best_X
|
Implement a simulated annealing algorithm with exponential cooling
Has two stopping conditions:
1. Maximum number of iterations;
2. A known lower bound, a none is passed then this is not used.
Note that starting with an initial_temperature corresponds to a hill
climbing algorithm
|
def configure(self, options, conf):
"""Turn style-forcing on if bar-forcing is on.
It'd be messy to position the bar but still have the rest of the
terminal capabilities emit ''.
"""
super(ProgressivePlugin, self).configure(options, conf)
if (getattr(options, 'verbosity', 0) > 1 and
getattr(options, 'enable_plugin_id', False)):
# TODO: Can we forcibly disable the ID plugin?
print ('Using --with-id and --verbosity=2 or higher with '
'nose-progressive causes visualization errors. Remove one '
'or the other to avoid a mess.')
if options.with_bar:
options.with_styling = True
|
Turn style-forcing on if bar-forcing is on.
It'd be messy to position the bar but still have the rest of the
terminal capabilities emit ''.
|
async def set_mode(self, mode, timeout=OTGW_DEFAULT_TIMEOUT):
"""
Set the operating mode to either "Gateway" mode (:mode: =
OTGW_MODE_GATEWAY or 1) or "Monitor" mode (:mode: =
OTGW_MODE_MONITOR or 0), or use this method to reset the device
(:mode: = OTGW_MODE_RESET).
Return the newly activated mode, or the full renewed status
dict after a reset.
This method is a coroutine
"""
cmd = OTGW_CMD_MODE
status = {}
ret = await self._wait_for_cmd(cmd, mode, timeout)
if ret is None:
return
if mode is OTGW_MODE_RESET:
self._protocol.status = {}
await self.get_reports()
await self.get_status()
return dict(self._protocol.status)
status[OTGW_MODE] = ret
self._update_status(status)
return ret
|
Set the operating mode to either "Gateway" mode (:mode: =
OTGW_MODE_GATEWAY or 1) or "Monitor" mode (:mode: =
OTGW_MODE_MONITOR or 0), or use this method to reset the device
(:mode: = OTGW_MODE_RESET).
Return the newly activated mode, or the full renewed status
dict after a reset.
This method is a coroutine
|
def could_scope_out(self):
"""
could bubble up from current scope
:return:
"""
return not self.waiting_for or \
isinstance(self.waiting_for, callable.EndOfStory) or \
self.is_breaking_a_loop()
|
could bubble up from current scope
:return:
|
def toDict(self):
"""To Dict
Returns the Parent as a dictionary in the same format as is used in
constructing it
Returns:
dict
"""
# Get the parents dict as the starting point of our return
dRet = super(Parent,self).toDict()
# Go through each field and add it to the return
for k,v in iteritems(self._nodes):
dRet[k] = v.toDict()
# Return
return dRet
|
To Dict
Returns the Parent as a dictionary in the same format as is used in
constructing it
Returns:
dict
|
def remove_unit_rules(grammar, inplace=False):
# type: (Grammar, bool) -> Grammar
"""
Remove unit rules from the grammar.
:param grammar: Grammar where remove the rules.
:param inplace: True if transformation should be performed in place. False by default.
:return: Grammar without unit rules.
"""
# copy if needed
if inplace is False:
grammar = copy(grammar)
# get connections
res = find_nonterminals_reachable_by_unit_rules(grammar)
# iterate through rules
for rule in grammar.rules.copy():
# delete unit rules
if _is_unit(rule):
grammar.rules.remove(rule)
continue
for nonterm in grammar.nonterminals:
# find all nonterminals that can rewrite to current rule
path = res.path_rules(nonterm, rule.fromSymbol)
# get rid of cyclic paths
if len(path) > 0 and path[0].fromSymbol != path[-1].toSymbol:
created = _create_rule(path, rule)
grammar.rules.add(created)
return grammar
|
Remove unit rules from the grammar.
:param grammar: Grammar where remove the rules.
:param inplace: True if transformation should be performed in place. False by default.
:return: Grammar without unit rules.
|
def semantic_similarity(go_id1, go_id2, godag, branch_dist=None):
'''
Finds the semantic similarity (inverse of the semantic distance)
between two GO terms.
'''
dist = semantic_distance(go_id1, go_id2, godag, branch_dist)
if dist is not None:
return 1.0 / float(dist)
|
Finds the semantic similarity (inverse of the semantic distance)
between two GO terms.
|
def model_performance(self, test_data=None, train=False, valid=False, xval=False):
"""
Generate model metrics for this model on test_data.
:param H2OFrame test_data: Data set for which model metrics shall be computed against. All three of train,
valid and xval arguments are ignored if test_data is not None.
:param bool train: Report the training metrics for the model.
:param bool valid: Report the validation metrics for the model.
:param bool xval: Report the cross-validation metrics for the model. If train and valid are True, then it
defaults to True.
:returns: An object of class H2OModelMetrics.
"""
if test_data is None:
if not train and not valid and not xval: train = True # default to train
if train: return self._model_json["output"]["training_metrics"]
if valid: return self._model_json["output"]["validation_metrics"]
if xval: return self._model_json["output"]["cross_validation_metrics"]
else: # cases dealing with test_data not None
if not isinstance(test_data, h2o.H2OFrame):
raise ValueError("`test_data` must be of type H2OFrame. Got: " + type(test_data))
if (self._model_json["response_column_name"] != None) and not(self._model_json["response_column_name"] in test_data.names):
print("WARNING: Model metrics cannot be calculated and metric_json is empty due to the absence of the response column in your dataset.")
return
res = h2o.api("POST /3/ModelMetrics/models/%s/frames/%s" % (self.model_id, test_data.frame_id))
# FIXME need to do the client-side filtering... (PUBDEV-874)
raw_metrics = None
for mm in res["model_metrics"]:
if mm["frame"] is not None and mm["frame"]["name"] == test_data.frame_id:
raw_metrics = mm
break
return self._metrics_class(raw_metrics, algo=self._model_json["algo"])
|
Generate model metrics for this model on test_data.
:param H2OFrame test_data: Data set for which model metrics shall be computed against. All three of train,
valid and xval arguments are ignored if test_data is not None.
:param bool train: Report the training metrics for the model.
:param bool valid: Report the validation metrics for the model.
:param bool xval: Report the cross-validation metrics for the model. If train and valid are True, then it
defaults to True.
:returns: An object of class H2OModelMetrics.
|
def _smixins(self, name):
"""Inner wrapper to search for mixins by name.
"""
return (self._mixins[name] if name in self._mixins else False)
|
Inner wrapper to search for mixins by name.
|
def get_hostfirmware(self,callb=None):
"""Convenience method to request the device firmware info from the device
This method will check whether the value has already been retrieved from the device,
if so, it will simply return it. If no, it will request the information from the device
and request that callb be executed when a response is received. The default callback
will simply cache the value.
:param callb: Callable to be used when the response is received. If not set,
self.resp_set_label will be used.
:type callb: callable
:returns: The cached value
:rtype: str
"""
if self.host_firmware_version is None:
mypartial=partial(self.resp_set_hostfirmware)
if callb:
mycallb=lambda x,y:(mypartial(y),callb(x,y))
else:
mycallb=lambda x,y:mypartial(y)
response = self.req_with_resp(GetHostFirmware, StateHostFirmware,mycallb )
return (self.host_firmware_version,self.host_firmware_build_timestamp)
|
Convenience method to request the device firmware info from the device
This method will check whether the value has already been retrieved from the device,
if so, it will simply return it. If no, it will request the information from the device
and request that callb be executed when a response is received. The default callback
will simply cache the value.
:param callb: Callable to be used when the response is received. If not set,
self.resp_set_label will be used.
:type callb: callable
:returns: The cached value
:rtype: str
|
def add_host(kwargs=None, call=None):
'''
Add a host system to the specified cluster or datacenter in this VMware environment
.. note::
To use this function, you need to specify ``esxi_host_user`` and
``esxi_host_password`` under your provider configuration set up at
``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/vmware.conf``:
.. code-block:: yaml
vcenter01:
driver: vmware
user: 'DOMAIN\\user'
password: 'verybadpass'
url: 'vcenter01.domain.com'
# Required when adding a host system
esxi_host_user: 'root'
esxi_host_password: 'myhostpassword'
# Optional fields that can be specified when adding a host system
esxi_host_ssl_thumbprint: '12:A3:45:B6:CD:7E:F8:90:A1:BC:23:45:D6:78:9E:FA:01:2B:34:CD'
The SSL thumbprint of the host system can be optionally specified by setting
``esxi_host_ssl_thumbprint`` under your provider configuration. To get the SSL
thumbprint of the host system, execute the following command from a remote
server:
.. code-block:: bash
echo -n | openssl s_client -connect <YOUR-HOSTSYSTEM-DNS/IP>:443 2>/dev/null | openssl x509 -noout -fingerprint -sha1
CLI Example:
.. code-block:: bash
salt-cloud -f add_host my-vmware-config host="myHostSystemName" cluster="myClusterName"
salt-cloud -f add_host my-vmware-config host="myHostSystemName" datacenter="myDatacenterName"
'''
if call != 'function':
raise SaltCloudSystemExit(
'The add_host function must be called with '
'-f or --function.'
)
host_name = kwargs.get('host') if kwargs and 'host' in kwargs else None
cluster_name = kwargs.get('cluster') if kwargs and 'cluster' in kwargs else None
datacenter_name = kwargs.get('datacenter') if kwargs and 'datacenter' in kwargs else None
host_user = config.get_cloud_config_value(
'esxi_host_user', get_configured_provider(), __opts__, search_global=False
)
host_password = config.get_cloud_config_value(
'esxi_host_password', get_configured_provider(), __opts__, search_global=False
)
host_ssl_thumbprint = config.get_cloud_config_value(
'esxi_host_ssl_thumbprint', get_configured_provider(), __opts__, search_global=False
)
if not host_user:
raise SaltCloudSystemExit(
'You must specify the ESXi host username in your providers config.'
)
if not host_password:
raise SaltCloudSystemExit(
'You must specify the ESXi host password in your providers config.'
)
if not host_name:
raise SaltCloudSystemExit(
'You must specify either the IP or DNS name of the host system.'
)
if (cluster_name and datacenter_name) or not(cluster_name or datacenter_name):
raise SaltCloudSystemExit(
'You must specify either the cluster name or the datacenter name.'
)
# Get the service instance
si = _get_si()
if cluster_name:
cluster_ref = salt.utils.vmware.get_mor_by_property(si, vim.ClusterComputeResource, cluster_name)
if not cluster_ref:
raise SaltCloudSystemExit(
'Specified cluster does not exist.'
)
if datacenter_name:
datacenter_ref = salt.utils.vmware.get_mor_by_property(si, vim.Datacenter, datacenter_name)
if not datacenter_ref:
raise SaltCloudSystemExit(
'Specified datacenter does not exist.'
)
spec = vim.host.ConnectSpec(
hostName=host_name,
userName=host_user,
password=host_password,
)
if host_ssl_thumbprint:
spec.sslThumbprint = host_ssl_thumbprint
else:
log.warning('SSL thumbprint has not been specified in provider configuration')
# This smells like a not-so-good idea. A plenty of VMWare VCenters
# do not listen to the default port 443.
try:
log.debug('Trying to get the SSL thumbprint directly from the host system')
p1 = subprocess.Popen(('echo', '-n'),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p2 = subprocess.Popen(('openssl',
's_client',
'-connect',
'{0}:443'.format(host_name)),
stdin=p1.stdout,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p3 = subprocess.Popen(('openssl',
'x509',
'-noout',
'-fingerprint',
'-sha1'),
stdin=p2.stdout,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out = salt.utils.stringutils.to_str(p3.stdout.read())
ssl_thumbprint = out.split('=')[-1].strip()
log.debug('SSL thumbprint received from the host system: %s', ssl_thumbprint)
spec.sslThumbprint = ssl_thumbprint
except Exception as exc:
log.error(
'Error while trying to get SSL thumbprint of host %s: %s',
host_name, exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return {host_name: 'failed to add host'}
try:
if cluster_name:
task = cluster_ref.AddHost(spec=spec, asConnected=True)
ret = 'added host system to cluster {0}'.format(cluster_name)
if datacenter_name:
task = datacenter_ref.hostFolder.AddStandaloneHost(spec=spec, addConnected=True)
ret = 'added host system to datacenter {0}'.format(datacenter_name)
salt.utils.vmware.wait_for_task(task, host_name, 'add host system', 5, 'info')
except Exception as exc:
if isinstance(exc, vim.fault.SSLVerifyFault):
log.error('Authenticity of the host\'s SSL certificate is not verified')
log.info('Try again after setting the esxi_host_ssl_thumbprint '
'to %s in provider configuration', spec.sslThumbprint)
log.error(
'Error while adding host %s: %s',
host_name, exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return {host_name: 'failed to add host'}
return {host_name: ret}
|
Add a host system to the specified cluster or datacenter in this VMware environment
.. note::
To use this function, you need to specify ``esxi_host_user`` and
``esxi_host_password`` under your provider configuration set up at
``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/vmware.conf``:
.. code-block:: yaml
vcenter01:
driver: vmware
user: 'DOMAIN\\user'
password: 'verybadpass'
url: 'vcenter01.domain.com'
# Required when adding a host system
esxi_host_user: 'root'
esxi_host_password: 'myhostpassword'
# Optional fields that can be specified when adding a host system
esxi_host_ssl_thumbprint: '12:A3:45:B6:CD:7E:F8:90:A1:BC:23:45:D6:78:9E:FA:01:2B:34:CD'
The SSL thumbprint of the host system can be optionally specified by setting
``esxi_host_ssl_thumbprint`` under your provider configuration. To get the SSL
thumbprint of the host system, execute the following command from a remote
server:
.. code-block:: bash
echo -n | openssl s_client -connect <YOUR-HOSTSYSTEM-DNS/IP>:443 2>/dev/null | openssl x509 -noout -fingerprint -sha1
CLI Example:
.. code-block:: bash
salt-cloud -f add_host my-vmware-config host="myHostSystemName" cluster="myClusterName"
salt-cloud -f add_host my-vmware-config host="myHostSystemName" datacenter="myDatacenterName"
|
def plotOptMod(verNObg3gray, VERgray):
""" called from either readTranscar.py or hist-feasibility/plotsnew.py """
if VERgray is None and verNObg3gray is None:
return
fg = figure()
ax2 = fg.gca() # summed (as camera would see)
if VERgray is not None:
z = VERgray.alt_km
Ek = VERgray.energy_ev.values
# ax1.semilogx(VERgray, z, marker='',label='filt', color='b')
props = {'boxstyle': 'round', 'facecolor': 'wheat', 'alpha': 0.5}
fgs, axs = fg.subplots(6, 6, sharex=True, sharey='row')
axs = axs.ravel() # for convenient iteration
fgs.subplots_adjust(hspace=0, wspace=0)
fgs.suptitle('filtered VER/flux')
fgs.text(0.04, 0.5, 'Altitude [km]', va='center', rotation='vertical')
fgs.text(0.5, 0.04, 'Beam energy [eV]', ha='center')
for i, e in enumerate(Ek):
axs[i].semilogx(VERgray.loc[:, e], z)
axs[i].set_xlim((1e-3, 1e4))
# place a text box in upper left in axes coords
axs[i].text(0.95, 0.95, '{:0.0f}'.format(e)+'eV',
transform=axs[i].transAxes, fontsize=12,
va='top', ha='right', bbox=props)
for i in range(33, 36):
axs[i].axis('off')
ax2.semilogx(VERgray.sum(axis=1), z, label='filt', color='b')
# specific to energies
ax = figure().gca()
for e in Ek:
ax.semilogx(VERgray.loc[:, e], z, marker='', label='{:.0f} eV'.format(e))
ax.set_title('filtered VER/flux')
ax.set_xlabel('VER/flux')
ax.set_ylabel('altitude [km]')
ax.legend(loc='best', fontsize=8)
ax.set_xlim((1e-5, 1e5))
ax.grid(True)
if verNObg3gray is not None:
ax1 = figure().gca() # overview
z = verNObg3gray.alt_km
Ek = verNObg3gray.energy_ev.values
ax1.semilogx(verNObg3gray, z, marker='', label='unfilt', color='r')
ax2.semilogx(verNObg3gray.sum(axis=1), z, label='unfilt', color='r')
ax = figure().gca()
for e in Ek:
ax.semilogx(verNObg3gray.loc[:, e], z, marker='', label='{:.0f} eV'.format(e))
ax.set_title('UNfiltered VER/flux')
ax.set_xlabel('VER/flux')
ax.set_ylabel('altitude [km]')
ax.legend(loc='best', fontsize=8)
ax.set_xlim((1e-5, 1e5))
ax.grid(True)
ax1.set_title('VER/flux, one profile per beam')
ax1.set_xlabel('VER/flux')
ax1.set_ylabel('altitude [km]')
ax1.grid(True)
ax2.set_xlabel('VER/flux')
ax2.set_ylabel('altitude [km]')
ax2.set_title('VER/flux summed over all energy beams \n (as the camera would see)')
ax2.legend(loc='best')
ax2.grid(True)
|
called from either readTranscar.py or hist-feasibility/plotsnew.py
|
def remover(self, id_interface):
"""Remove an interface by its identifier.
:param id_interface: Interface identifier.
:return: None
:raise InterfaceNaoExisteError: Interface doesn't exist.
:raise InterfaceError: Interface is linked to another interface.
:raise InvalidParameterError: The interface identifier is invalid or none.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_interface):
raise InvalidParameterError(
u'Interface id is invalid or was not informed.')
url = 'interface/' + str(id_interface) + '/'
code, xml = self.submit(None, 'DELETE', url)
return self.response(code, xml)
|
Remove an interface by its identifier.
:param id_interface: Interface identifier.
:return: None
:raise InterfaceNaoExisteError: Interface doesn't exist.
:raise InterfaceError: Interface is linked to another interface.
:raise InvalidParameterError: The interface identifier is invalid or none.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
|
def parse(self, text):
"""Call the server and return the raw results."""
if isinstance(text, bytes):
text = text.decode("ascii")
text = re.sub("\s+", " ", unidecode(text))
return self.communicate(text + "\n")
|
Call the server and return the raw results.
|
def _scale(x, min_x_value, max_x_value, output_min, output_max):
"""Scale a column to [output_min, output_max].
Assumes the columns's range is [min_x_value, max_x_value]. If this is not
true at training or prediction time, the output value of this scale could be
outside the range [output_min, output_max].
Raises:
ValueError: if min_x_value = max_x_value, as the column is constant.
"""
if round(min_x_value - max_x_value, 7) == 0:
# There is something wrong with the data.
# Why round to 7 places? It's the same as unittest's assertAlmostEqual.
raise ValueError('In make_scale_tito, min_x_value == max_x_value')
def _scale(x):
min_x_valuef = tf.to_float(min_x_value)
max_x_valuef = tf.to_float(max_x_value)
output_minf = tf.to_float(output_min)
output_maxf = tf.to_float(output_max)
return ((((tf.to_float(x) - min_x_valuef) * (output_maxf - output_minf)) /
(max_x_valuef - min_x_valuef)) + output_minf)
return _scale(x)
|
Scale a column to [output_min, output_max].
Assumes the columns's range is [min_x_value, max_x_value]. If this is not
true at training or prediction time, the output value of this scale could be
outside the range [output_min, output_max].
Raises:
ValueError: if min_x_value = max_x_value, as the column is constant.
|
def get_scripts():
"""Get custom npm scripts."""
proc = Popen(['npm', 'run-script'], stdout=PIPE)
should_yeild = False
for line in proc.stdout.readlines():
line = line.decode()
if 'available via `npm run-script`:' in line:
should_yeild = True
continue
if should_yeild and re.match(r'^ [^ ]+', line):
yield line.strip().split(' ')[0]
|
Get custom npm scripts.
|
def DeserializeUnsignedWithoutType(self, reader):
"""
Deserialize object without reading transaction type data.
Args:
reader (neo.IO.BinaryReader):
"""
self.Version = reader.ReadByte()
self.DeserializeExclusiveData(reader)
self.Attributes = reader.ReadSerializableArray('neo.Core.TX.TransactionAttribute.TransactionAttribute',
max=self.MAX_TX_ATTRIBUTES)
self.inputs = reader.ReadSerializableArray('neo.Core.CoinReference.CoinReference')
self.outputs = reader.ReadSerializableArray('neo.Core.TX.Transaction.TransactionOutput')
|
Deserialize object without reading transaction type data.
Args:
reader (neo.IO.BinaryReader):
|
def _tp_finder(self, dcycle): # Private routine
"""
Routine to find thermal pulses in given star and returns an
index vector that gives the cycle number in which the thermal
pulse occure.
The routine looks for the C/O ratio jumping up and up, so only
useful in TP-AGB star. A vector is given back that indicates
the position of the cycle that is at 95% of the thermal pulse
(to make sure it's not in the next one and that most of the
processing is done). The script also returns the co_ratio
vector - the C/O ratio (number fraction) at the given thermal
pulse.
"""
# read in c and o isotopes for all cycles, regarding deltacycle
last_cycle = int(self.se.cycles[len(self.se.cycles)-1])
cyc_tp = list(range(1,last_cycle + dcycle, dcycle))
all_data = array(self.get(cyc_tp,['C-12','C-13','O-16','O-17','O-18']))
c_nf = np.zeros(len(all_data))
o_nf = np.zeros(len(all_data))
for i in range(len(all_data)):
c_nf[i] = all_data[i][0] + all_data[i][1]
o_nf[i] = all_data[i][2] + all_data[i][3] + all_data[i][4]
# search for thermal pulses
co_ratio = (old_div(c_nf, o_nf)) * 15.9994 / 12.0107
tp_guess = 200 # this should be an upper limit!
tp_guess_max = 200 # to through an error
# guess variables, i is the actual break criterion, n a max counter
gi = 0
gn = 0
while gi != 1 and gn < 10000:
tp_ind = list()
i = 0
while i < len(co_ratio)-2:
gcompar= old_div(1., (dcycle*tp_guess*100.))
slope1 = old_div((co_ratio[i+1]-co_ratio[i]),(dcycle))
slope2 = old_div((co_ratio[i+2]-co_ratio[i+1]),dcycle)
if slope1 > gcompar and slope2 < gcompar and co_ratio[i+1] > co_ratio[i]:
tp_ind.append(i+1)
i += 3 # jump three cycles to avoid defining a single cycle twice!
else:
i += 1
if abs(len(tp_ind) - tp_guess) < old_div(tp_guess,2): # gotta be within factor two of guess
gi = 1
else:
gn += 1
tp_guess /= 2
# check w/ maximum of thermal pulses allowed
if len(tp_ind) > tp_guess_max:
print('Problem detected with number of pulses')
# create thermal pulse vector
tp_startf = zeros(len(tp_ind)) # found start
for i in range(len(tp_startf)):
tp_startf[i] = cyc_tp[tp_ind[i]]
# read out isotopic composition at 95% of the thermal pulse and the initial of the star
# set up thermal pulse positions
tp_limits = zeros(len(tp_startf)+1)
for i in range(len(tp_startf)):
tp_limits[i] = tp_startf[i]
tp_limits[len(tp_limits)-1] = int(self.se.cycles[len(self.se.cycles)-1])
# thermal pulse position (where to read the isotope ratio)
tp_pos = list()
for i in range(len(tp_startf)):
tp_pos.append(int(tp_limits[i] + 0.95 * (tp_limits[i+1] - tp_limits[i])))
# create co_ret vector to return c/o ratio vector
co_return = zeros(len(tp_pos))
for i in range(len(tp_pos)):
co_return[i] = co_ratio[tp_ind[i]]
# return the two vectors
return tp_pos,co_return
|
Routine to find thermal pulses in given star and returns an
index vector that gives the cycle number in which the thermal
pulse occure.
The routine looks for the C/O ratio jumping up and up, so only
useful in TP-AGB star. A vector is given back that indicates
the position of the cycle that is at 95% of the thermal pulse
(to make sure it's not in the next one and that most of the
processing is done). The script also returns the co_ratio
vector - the C/O ratio (number fraction) at the given thermal
pulse.
|
def getBlock(self, block_identifier, full_transactions=False):
"""
`eth_getBlockByHash`
`eth_getBlockByNumber`
"""
method = select_method_for_block_identifier(
block_identifier,
if_predefined='eth_getBlockByNumber',
if_hash='eth_getBlockByHash',
if_number='eth_getBlockByNumber',
)
result = self.web3.manager.request_blocking(
method,
[block_identifier, full_transactions],
)
if result is None:
raise BlockNotFound(f"Block with id: {block_identifier} not found.")
return result
|
`eth_getBlockByHash`
`eth_getBlockByNumber`
|
def get_summary(self):
""" Return the function summary
Returns:
(str, list, list, list, list): (name, inheritance, variables, fuction summaries, modifier summaries)
"""
func_summaries = [f.get_summary() for f in self.functions]
modif_summaries = [f.get_summary() for f in self.modifiers]
return (self.name, [str(x) for x in self.inheritance], [str(x) for x in self.variables], func_summaries, modif_summaries)
|
Return the function summary
Returns:
(str, list, list, list, list): (name, inheritance, variables, fuction summaries, modifier summaries)
|
async def reset_webhook(self, check=True) -> bool:
"""
Reset webhook
:param check: check before deleting
:return:
"""
if check:
wh = await self.bot.get_webhook_info()
if not wh.url:
return False
return await self.bot.delete_webhook()
|
Reset webhook
:param check: check before deleting
:return:
|
def MobileDeviceProvisioningProfile(self, data=None, subset=None):
"""{dynamic_docstring}"""
return self.factory.get_object(
jssobjects.MobileDeviceProvisioningProfile, data, subset)
|
{dynamic_docstring}
|
def convert_uuid(self, in_uuid: str = str, mode: bool = 0):
"""Convert a metadata UUID to its URI equivalent. And conversely.
:param str in_uuid: UUID or URI to convert
:param int mode: conversion direction. Options:
* 0 to HEX
* 1 to URN (RFC4122)
* 2 to URN (Isogeo specific style)
"""
# parameters check
if not isinstance(in_uuid, str):
raise TypeError("'in_uuid' expected a str value.")
else:
pass
if not checker.check_is_uuid(in_uuid):
raise ValueError("{} is not a correct UUID".format(in_uuid))
else:
pass
if not isinstance(mode, int):
raise TypeError("'mode' expects an integer value")
else:
pass
# handle Isogeo specific UUID in XML exports
if "isogeo:metadata" in in_uuid:
in_uuid = "urn:uuid:{}".format(in_uuid.split(":")[-1])
logging.debug("Isogeo UUUID URN spotted: {}".format(in_uuid))
else:
pass
# operate
if mode == 0:
return uuid.UUID(in_uuid).hex
elif mode == 1:
return uuid.UUID(in_uuid).urn
elif mode == 2:
urn = uuid.UUID(in_uuid).urn
return "urn:isogeo:metadata:uuid:{}".format(urn.split(":")[2])
else:
raise ValueError("'mode' must be one of: 0 | 1 | 2")
|
Convert a metadata UUID to its URI equivalent. And conversely.
:param str in_uuid: UUID or URI to convert
:param int mode: conversion direction. Options:
* 0 to HEX
* 1 to URN (RFC4122)
* 2 to URN (Isogeo specific style)
|
def url_defaults(self, fn):
"""
Callback function for URL defaults for this bundle. It's called
with the endpoint and values and should update the values passed
in place.
"""
self._defer(lambda bp: bp.url_defaults(fn))
return fn
|
Callback function for URL defaults for this bundle. It's called
with the endpoint and values and should update the values passed
in place.
|
def readB1header(filename):
"""Read beamline B1 (HASYLAB, Hamburg) header data
Input
-----
filename: string
the file name. If ends with ``.gz``, it is fed through a ``gunzip``
filter
Output
------
A header dictionary.
Examples
--------
read header data from 'ORG000123.DAT'::
header=readB1header('ORG00123.DAT')
"""
# Planck's constant times speed of light: incorrect
# constant in the old program on hasjusi1, which was
# taken over by the measurement program, to keep
# compatibility with that.
hed = {}
if libconfig.LENGTH_UNIT == 'A':
jusifaHC = 12396.4
elif libconfig.LENGTH_UNIT == 'nm':
jusifaHC = 1239.64
else:
raise NotImplementedError(
'Invalid length unit: ' + str(libconfig.LENGTH_UNIT))
if filename.upper().endswith('.GZ'):
fid = gzip.GzipFile(filename, 'r')
else:
fid = open(filename, 'rt')
lines = fid.readlines()
fid.close()
hed['FSN'] = int(lines[0].strip())
hed['Hour'] = int(lines[17].strip())
hed['Minutes'] = int(lines[18].strip())
hed['Month'] = int(lines[19].strip())
hed['Day'] = int(lines[20].strip())
hed['Year'] = int(lines[21].strip()) + 2000
hed['FSNref1'] = int(lines[23].strip())
hed['FSNdc'] = int(lines[24].strip())
hed['FSNsensitivity'] = int(lines[25].strip())
hed['FSNempty'] = int(lines[26].strip())
hed['FSNref2'] = int(lines[27].strip())
hed['Monitor'] = float(lines[31].strip())
hed['Anode'] = float(lines[32].strip())
hed['MeasTime'] = float(lines[33].strip())
hed['Temperature'] = float(lines[34].strip())
hed['BeamPosX'] = float(lines[36].strip())
hed['BeamPosY'] = float(lines[37].strip())
hed['Transm'] = float(lines[41].strip())
hed['Wavelength'] = float(lines[43].strip())
hed['Energy'] = jusifaHC / hed['Wavelength']
hed['Dist'] = float(lines[46].strip())
hed['XPixel'] = 1 / float(lines[49].strip())
hed['YPixel'] = 1 / float(lines[50].strip())
hed['Title'] = lines[53].strip().replace(' ', '_').replace('-', '_')
hed['MonitorDORIS'] = float(lines[56].strip()) # aka. DORIS counter
hed['Owner'] = lines[57].strip()
hed['RotXSample'] = float(lines[59].strip())
hed['RotYSample'] = float(lines[60].strip())
hed['PosSample'] = float(lines[61].strip())
hed['DetPosX'] = float(lines[62].strip())
hed['DetPosY'] = float(lines[63].strip())
hed['MonitorPIEZO'] = float(lines[64].strip()) # aka. PIEZO counter
hed['BeamsizeX'] = float(lines[66].strip())
hed['BeamsizeY'] = float(lines[67].strip())
hed['PosRef'] = float(lines[70].strip())
hed['Monochromator1Rot'] = float(lines[77].strip())
hed['Monochromator2Rot'] = float(lines[78].strip())
hed['Heidenhain1'] = float(lines[79].strip())
hed['Heidenhain2'] = float(lines[80].strip())
hed['Current1'] = float(lines[81].strip())
hed['Current2'] = float(lines[82].strip())
hed['Detector'] = 'Unknown'
hed['PixelSize'] = (hed['XPixel'] + hed['YPixel']) / 2.0
hed['AnodeError'] = math.sqrt(hed['Anode'])
hed['TransmError'] = 0
hed['MonitorError'] = math.sqrt(hed['Monitor'])
hed['MonitorPIEZOError'] = math.sqrt(hed['MonitorPIEZO'])
hed['MonitorDORISError'] = math.sqrt(hed['MonitorDORIS'])
hed['Date'] = datetime.datetime(
hed['Year'], hed['Month'], hed['Day'], hed['Hour'], hed['Minutes'])
hed['__Origin__'] = 'B1 original'
hed['__particle__'] = 'photon'
return hed
|
Read beamline B1 (HASYLAB, Hamburg) header data
Input
-----
filename: string
the file name. If ends with ``.gz``, it is fed through a ``gunzip``
filter
Output
------
A header dictionary.
Examples
--------
read header data from 'ORG000123.DAT'::
header=readB1header('ORG00123.DAT')
|
def extract_entry(self, e, decompress='auto'):
"""Yield blocks of data for this entry from this MAR file.
Args:
e (:obj:`mardor.format.index_entry`): An index_entry object that
refers to this file's size and offset inside the MAR file.
path (str): Where on disk to extract this file to.
decompress (str, optional): Controls whether files are decompressed
when extracted. Must be one of None, 'auto', 'bz2', or 'xz'.
Defaults to 'auto'
Yields:
Blocks of data for `e`
"""
self.fileobj.seek(e.offset)
stream = file_iter(self.fileobj)
stream = takeexactly(stream, e.size)
if decompress == 'auto':
stream = auto_decompress_stream(stream)
elif decompress == 'bz2':
stream = bz2_decompress_stream(stream)
elif decompress == 'xz':
stream = xz_decompress_stream(stream)
elif decompress is None:
pass
else:
raise ValueError("Unsupported decompression type: {}".format(decompress))
for block in stream:
yield block
|
Yield blocks of data for this entry from this MAR file.
Args:
e (:obj:`mardor.format.index_entry`): An index_entry object that
refers to this file's size and offset inside the MAR file.
path (str): Where on disk to extract this file to.
decompress (str, optional): Controls whether files are decompressed
when extracted. Must be one of None, 'auto', 'bz2', or 'xz'.
Defaults to 'auto'
Yields:
Blocks of data for `e`
|
def category(self, category_id, country=None, locale=None):
"""Get a single category used to tag items in Spotify.
Parameters
----------
category_id : str
The Spotify category ID for the category.
country : COUNTRY_TP
COUNTRY
locale : LOCALE_TP
LOCALE
"""
route = Route('GET', '/browse/categories/{category_id}', category_id=category_id)
payload = {}
if country:
payload['country'] = country
if locale:
payload['locale'] = locale
return self.request(route, params=payload)
|
Get a single category used to tag items in Spotify.
Parameters
----------
category_id : str
The Spotify category ID for the category.
country : COUNTRY_TP
COUNTRY
locale : LOCALE_TP
LOCALE
|
def read_config(self):
""" reads config """
# multi-options
self.ssl = self.get_option("ssl")
self.tank_type = self.get_option("tank_type")
# TODO: refactor. Maybe we should decide how to interact with
# StepperWrapper here.
# self.instances = self.get_option('instances')
self.gatling = ' '.join(self.get_option('gatling_ip').split("\n"))
self.method_prefix = self.get_option("method_prefix")
self.method_options = self.get_option("method_options")
self.source_log_prefix = self.get_option("source_log_prefix")
self.phantom_http_line = self.get_option("phantom_http_line")
self.phantom_http_field_num = self.get_option("phantom_http_field_num")
self.phantom_http_field = self.get_option("phantom_http_field")
self.phantom_http_entity = self.get_option("phantom_http_entity")
self.address = self.get_option('address')
do_test_connect = self.get_option("connection_test")
explicit_port = self.get_option('port', '')
self.ipv6, self.resolved_ip, self.port, self.address = self.address_wizard.resolve(
self.address, do_test_connect, explicit_port)
logger.info(
"Resolved %s into %s:%s", self.address, self.resolved_ip, self.port)
self.client_cipher_suites = self.get_option("client_cipher_suites", "")
self.client_certificate = self.get_option("client_certificate", "")
self.client_key = self.get_option("client_key", "")
self.stepper_wrapper.read_config()
|
reads config
|
def arrow_get(string):
'''this function exists because ICS uses ISO 8601 without dashes or
colons, i.e. not ISO 8601 at all.'''
# replace slashes with dashes
if '/' in string:
string = string.replace('/', '-')
# if string contains dashes, assume it to be proper ISO 8601
if '-' in string:
return arrow.get(string)
string = string.rstrip('Z')
return arrow.get(string, DATE_FORMATS[len(string)])
|
this function exists because ICS uses ISO 8601 without dashes or
colons, i.e. not ISO 8601 at all.
|
def _process_diseasegene(self, limit):
"""
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
line_counter = 0
model = Model(graph)
myfile = '/'.join((self.rawdir, self.files['disease-gene']['file']))
for event, elem in ET.iterparse(myfile):
if elem.tag == 'Disorder':
# get the element name and id, ignore element name
# id = elem.get('id') # some internal identifier
disorder_num = elem.find('OrphaNumber').text
disorder_id = 'ORPHA:' + str(disorder_num)
if self.test_mode and disorder_id not in self.all_test_ids['disease']:
continue
disorder_label = elem.find('Name').text
# assuming that these are in the ontology (...any particular one?)
model.addClassToGraph(disorder_id, disorder_label)
assoc_list = elem.find('DisorderGeneAssociationList')
expected_genes = assoc_list.get('count')
LOG.info(
'Expecting %s genes associated with disorder %s.',
expected_genes, disorder_id)
processed_genes = 0
for assoc in assoc_list.findall('DisorderGeneAssociation'):
processed_genes += 1
gene = assoc.find('Gene')
# get gene's curie HGNC or Ensembl ...
lclid = gene.find('OrphaNumber').text
gene_curie = 'ORPHA:' + lclid
gene_set = {'ORPHA': lclid}
for gene_ref in gene.findall(
'./ExternalReferenceList/ExternalReference'):
gene_set[gene_ref.find('Source').text] = \
gene_ref.find('Reference').text
# set priority (clique leader if available) but default to OPRHA
for pfx in ('HGNC', 'Ensembl', 'SwissProt'):
if pfx in gene_set:
if pfx in self.localtt:
pfx = self.localtt[pfx]
gene_curie = pfx + ':' + gene_set[pfx]
gene_set.pop(pfx)
model.addClassToGraph(gene_curie, None)
break
# TEC have reservations w.r.t aggerator links being gene classes
for prefix in gene_set:
lclid = gene_set[prefix]
if prefix in self.localtt:
prefix = self.localtt[prefix]
dbxref = prefix + ':' + lclid
if gene_curie != dbxref:
model.addClassToGraph(dbxref, None)
model.addEquivalentClass(gene_curie, dbxref)
# TEC. would prefer this not happen here. let HGNC handle it
# except there are some w/o explicit external links ...
gene_symbol = gene.find('Symbol').text
syn_list = gene.find('./SynonymList')
if int(syn_list.get('count')) > 0:
for syn in syn_list.findall('./Synonym'):
model.addSynonym(gene_curie, syn.text)
dg_label = assoc.find('./DisorderGeneAssociationType/Name').text
# use dg association status to issue an evidence code
# FIXME I think that these codes are sub-optimal
eco_id = self.resolve(
assoc.find('DisorderGeneAssociationStatus/Name').text)
rel_id = self.resolve(dg_label)
g2p_assoc = G2PAssoc(self.graph, self.name, gene_curie, disorder_id, rel_id)
g2p_assoc.add_evidence(eco_id)
g2p_assoc.add_association_to_graph()
elem.clear() # empty the element
if int(expected_genes) != processed_genes:
LOG.warning(
'% expected %s associated genes but we processed %i',
disorder_id, expected_genes, processed_genes)
if self.test_mode and limit is not None and line_counter > limit:
return
return
|
:param limit:
:return:
|
def filter(self, data, collection, **kwargs):
"""Filter given collection."""
if not data or self.filters is None:
return None, collection
filters = {}
for f in self.filters:
if f.name not in data:
continue
ops, collection = f.filter(collection, data, **kwargs)
filters[f.name] = ops
return filters, collection
|
Filter given collection.
|
def is_valid_python(tkn: str) -> bool:
"""Determine whether tkn is a valid python identifier
:param tkn:
:return:
"""
try:
root = ast.parse(tkn)
except SyntaxError:
return False
return len(root.body) == 1 and isinstance(root.body[0], ast.Expr) and isinstance(root.body[0].value, ast.Name)
|
Determine whether tkn is a valid python identifier
:param tkn:
:return:
|
def _convert_connected_app(self):
"""Convert Connected App to service"""
if self.services and "connected_app" in self.services:
# already a service
return
connected_app = self.get_connected_app()
if not connected_app:
# not configured
return
self.logger.warning(
"Reading Connected App info from deprecated config."
" Connected App should be changed to a service."
" If using environment keychain, update the environment variable."
" Otherwise, it has been handled automatically and you should not"
" see this message again."
)
ca_config = ServiceConfig(
{
"callback_url": connected_app.callback_url,
"client_id": connected_app.client_id,
"client_secret": connected_app.client_secret,
}
)
self.set_service("connected_app", ca_config)
|
Convert Connected App to service
|
def _handle_upsert(self, parts, unwritten_lobs=()):
"""Handle reply messages from INSERT or UPDATE statements"""
self.description = None
self._received_last_resultset_part = True # set to 'True' so that cursor.fetch*() returns just empty list
for part in parts:
if part.kind == part_kinds.ROWSAFFECTED:
self.rowcount = part.values[0]
elif part.kind in (part_kinds.TRANSACTIONFLAGS, part_kinds.STATEMENTCONTEXT, part_kinds.PARAMETERMETADATA):
pass
elif part.kind == part_kinds.WRITELOBREPLY:
# This part occurrs after lobs have been submitted not at all or only partially during an insert.
# In this case the parameter part of the Request message contains a list called 'unwritten_lobs'
# with LobBuffer instances.
# Those instances are in the same order as 'locator_ids' received in the reply message. These IDs
# are then used to deliver the missing LOB data to the server via WRITE_LOB_REQUESTs.
for lob_buffer, lob_locator_id in izip(unwritten_lobs, part.locator_ids):
# store locator_id in every lob buffer instance for later reference:
lob_buffer.locator_id = lob_locator_id
self._perform_lob_write_requests(unwritten_lobs)
else:
raise InterfaceError("Prepared insert statement response, unexpected part kind %d." % part.kind)
self._executed = True
|
Handle reply messages from INSERT or UPDATE statements
|
def get_form(self, **kwargs):
"""Returns the form for registering or inviting a user"""
if not hasattr(self, "form_class"):
raise AttributeError(_("You must define a form_class"))
return self.form_class(**kwargs)
|
Returns the form for registering or inviting a user
|
def _selection_by_callable(self, view, num_slices, non_empty_slices):
"""Returns all the slices selected by the given callable."""
selected = [sl for sl in non_empty_slices
if self._sampler(self._get_axis(self._image, view, sl))]
return selected[:num_slices]
|
Returns all the slices selected by the given callable.
|
def transform(self, X):
""" if already fit, can add new points and see where they fall"""
iclustup = []
dims = self.n_components
if hasattr(self, 'isort1'):
if X.shape[1] == self.v.shape[0]:
# reduce dimensionality of X
X = X @ self.v
nclust = self.n_X
AtS = self.A.T @ self.S
vnorm = np.sum(self.S * (self.A @ AtS), axis=0)[np.newaxis,:]
cv = X @ AtS
cmap = np.maximum(0., cv)**2 / vnorm
iclustup, cmax = upsample(np.sqrt(cmap), dims, nclust, 10)
else:
print('ERROR: new points do not have as many features as original data')
else:
print('ERROR: need to fit model first before you can embed new points')
if iclustup.ndim > 1:
iclustup = iclustup.T
else:
iclustup = iclustup.flatten()
return iclustup
|
if already fit, can add new points and see where they fall
|
def resize(self, dims):
"""Resize our drawing area to encompass a space defined by the
given dimensions.
"""
width, height = dims[:2]
self.dims = (width, height)
self.logger.debug("renderer reconfigured to %dx%d" % (
width, height))
# create cv surface the size of the window
# (cv just uses numpy arrays!)
depth = len(self.rgb_order)
self.surface = np.zeros((height, width, depth), dtype=np.uint8)
|
Resize our drawing area to encompass a space defined by the
given dimensions.
|
def database_caller_creator(self, host, port, name=None):
'''creates a redis connection object
which will be later used to modify the db
'''
name = name or 0
client = redis.StrictRedis(host=host, port=port, db=name)
pipe = client.pipeline(transaction=False)
return client, pipe
|
creates a redis connection object
which will be later used to modify the db
|
def get_mac_address_table(self):
"""
Returns a lists of dictionaries. Each dictionary represents an entry in the MAC Address
Table, having the following keys
* mac (string)
* interface (string)
* vlan (int)
* active (boolean)
* static (boolean)
* moves (int)
* last_move (float)
Format1:
Destination Address Address Type VLAN Destination Port
------------------- ------------ ---- --------------------
6400.f1cf.2cc6 Dynamic 1 Wlan-GigabitEthernet0
Cat 6500:
Legend: * - primary entry
age - seconds since last seen
n/a - not available
vlan mac address type learn age ports
------+----------------+--------+-----+----------+--------------------------
* 999 1111.2222.3333 dynamic Yes 0 Port-channel1
999 1111.2222.3333 dynamic Yes 0 Port-channel1
Cat 4948
Unicast Entries
vlan mac address type protocols port
-------+---------------+--------+---------------------+--------------------
999 1111.2222.3333 dynamic ip Port-channel1
Cat 2960
Mac Address Table
-------------------------------------------
Vlan Mac Address Type Ports
---- ----------- -------- -----
All 1111.2222.3333 STATIC CPU
"""
RE_MACTABLE_DEFAULT = r"^" + MAC_REGEX
RE_MACTABLE_6500_1 = r"^\*\s+{}\s+{}\s+".format(
VLAN_REGEX, MAC_REGEX
) # 7 fields
RE_MACTABLE_6500_2 = r"^{}\s+{}\s+".format(VLAN_REGEX, MAC_REGEX) # 6 fields
RE_MACTABLE_6500_3 = r"^\s{51}\S+" # Fill down prior
RE_MACTABLE_6500_4 = r"^R\s+{}\s+.*Router".format(
VLAN_REGEX, MAC_REGEX
) # Router field
RE_MACTABLE_6500_5 = r"^R\s+N/A\s+{}.*Router".format(
MAC_REGEX
) # Router skipped
RE_MACTABLE_4500_1 = r"^{}\s+{}\s+".format(VLAN_REGEX, MAC_REGEX) # 5 fields
RE_MACTABLE_4500_2 = r"^\s{32,34}\S+" # Fill down prior
RE_MACTABLE_4500_3 = r"^{}\s+{}\s+".format(
INT_REGEX, MAC_REGEX
) # Matches PHY int
RE_MACTABLE_2960_1 = r"^All\s+{}".format(MAC_REGEX)
RE_MACTABLE_GEN_1 = r"^{}\s+{}\s+".format(
VLAN_REGEX, MAC_REGEX
) # 4 fields-2960/4500
def process_mac_fields(vlan, mac, mac_type, interface):
"""Return proper data for mac address fields."""
if mac_type.lower() in ["self", "static", "system"]:
static = True
if vlan.lower() == "all":
vlan = 0
if (
interface.lower() == "cpu"
or re.search(r"router", interface.lower())
or re.search(r"switch", interface.lower())
):
interface = ""
else:
static = False
return {
"mac": napalm.base.helpers.mac(mac),
"interface": self._canonical_int(interface),
"vlan": int(vlan),
"static": static,
"active": True,
"moves": -1,
"last_move": -1.0,
}
mac_address_table = []
command = IOS_COMMANDS["show_mac_address"]
output = self._send_command(command)
# Skip the header lines
output = re.split(r"^----.*", output, flags=re.M)[1:]
output = "\n".join(output).strip()
# Strip any leading asterisks
output = re.sub(r"^\*", "", output, flags=re.M)
fill_down_vlan = fill_down_mac = fill_down_mac_type = ""
for line in output.splitlines():
# Cat6500 one off and 4500 multicast format
if re.search(RE_MACTABLE_6500_3, line) or re.search(
RE_MACTABLE_4500_2, line
):
interface = line.strip()
if "," in interface:
interfaces = interface.split(",")
else:
interfaces = [interface]
for single_interface in interfaces:
mac_address_table.append(
process_mac_fields(
fill_down_vlan,
fill_down_mac,
fill_down_mac_type,
single_interface,
)
)
continue
line = line.strip()
if line == "":
continue
if re.search(r"^---", line):
# Convert any '---' to VLAN 0
line = re.sub(r"^---", "0", line, flags=re.M)
# Format1
if re.search(RE_MACTABLE_DEFAULT, line):
if len(line.split()) == 4:
mac, mac_type, vlan, interface = line.split()
mac_address_table.append(
process_mac_fields(vlan, mac, mac_type, interface)
)
else:
raise ValueError("Unexpected output from: {}".format(line.split()))
# Cat6500 format
elif (
re.search(RE_MACTABLE_6500_1, line)
or re.search(RE_MACTABLE_6500_2, line)
) and len(line.split()) >= 6:
if len(line.split()) == 7:
_, vlan, mac, mac_type, _, _, interface = line.split()
elif len(line.split()) == 6:
vlan, mac, mac_type, _, _, interface = line.split()
if "," in interface:
interfaces = interface.split(",")
fill_down_vlan = vlan
fill_down_mac = mac
fill_down_mac_type = mac_type
for single_interface in interfaces:
mac_address_table.append(
process_mac_fields(vlan, mac, mac_type, single_interface)
)
else:
mac_address_table.append(
process_mac_fields(vlan, mac, mac_type, interface)
)
# Cat4500 format
elif re.search(RE_MACTABLE_4500_1, line) and len(line.split()) == 5:
vlan, mac, mac_type, _, interface = line.split()
mac_address_table.append(
process_mac_fields(vlan, mac, mac_type, interface)
)
# Cat4500 w/PHY interface in Mac Table. Vlan will be -1.
elif re.search(RE_MACTABLE_4500_3, line) and len(line.split()) == 5:
interface, mac, mac_type, _, _ = line.split()
interface = canonical_interface_name(interface)
vlan = "-1"
mac_address_table.append(
process_mac_fields(vlan, mac, mac_type, interface)
)
# Cat2960 format - ignore extra header line
elif re.search(r"^Vlan\s+Mac Address\s+", line):
continue
# Cat2960 format (Cat4500 format multicast entries)
elif (
re.search(RE_MACTABLE_2960_1, line)
or re.search(RE_MACTABLE_GEN_1, line)
) and len(line.split()) == 4:
vlan, mac, mac_type, interface = line.split()
if "," in interface:
interfaces = interface.split(",")
fill_down_vlan = vlan
fill_down_mac = mac
fill_down_mac_type = mac_type
for single_interface in interfaces:
mac_address_table.append(
process_mac_fields(vlan, mac, mac_type, single_interface)
)
else:
mac_address_table.append(
process_mac_fields(vlan, mac, mac_type, interface)
)
# 4500 in case of unused Vlan 1.
elif re.search(RE_MACTABLE_4500_1, line) and len(line.split()) == 3:
vlan, mac, mac_type = line.split()
mac_address_table.append(
process_mac_fields(vlan, mac, mac_type, interface="")
)
# 4500 w/PHY interface in Multicast table. Vlan will be -1.
elif re.search(RE_MACTABLE_4500_3, line) and len(line.split()) == 4:
vlan, mac, mac_type, interface = line.split()
vlan = "-1"
mac_address_table.append(
process_mac_fields(vlan, mac, mac_type, interface)
)
elif re.search(RE_MACTABLE_6500_4, line) and len(line.split()) == 7:
line = re.sub(r"^R\s+", "", line)
vlan, mac, mac_type, _, _, interface = line.split()
mac_address_table.append(
process_mac_fields(vlan, mac, mac_type, interface)
)
continue
elif re.search(RE_MACTABLE_6500_5, line):
line = re.sub(r"^R\s+", "", line)
vlan, mac, mac_type, _, _, interface = line.split()
# Convert 'N/A' VLAN to to 0
vlan = re.sub(r"N/A", "0", vlan)
mac_address_table.append(
process_mac_fields(vlan, mac, mac_type, interface)
)
continue
elif re.search(r"Total Mac Addresses", line):
continue
elif re.search(r"Multicast Entries", line):
continue
elif re.search(r"vlan.*mac.*address.*type.*", line):
continue
elif re.search(
r"Displaying entries from active supervisor:\s+\w+\s+\[\d\]:", line
):
continue
else:
raise ValueError("Unexpected output from: {}".format(repr(line)))
return mac_address_table
|
Returns a lists of dictionaries. Each dictionary represents an entry in the MAC Address
Table, having the following keys
* mac (string)
* interface (string)
* vlan (int)
* active (boolean)
* static (boolean)
* moves (int)
* last_move (float)
Format1:
Destination Address Address Type VLAN Destination Port
------------------- ------------ ---- --------------------
6400.f1cf.2cc6 Dynamic 1 Wlan-GigabitEthernet0
Cat 6500:
Legend: * - primary entry
age - seconds since last seen
n/a - not available
vlan mac address type learn age ports
------+----------------+--------+-----+----------+--------------------------
* 999 1111.2222.3333 dynamic Yes 0 Port-channel1
999 1111.2222.3333 dynamic Yes 0 Port-channel1
Cat 4948
Unicast Entries
vlan mac address type protocols port
-------+---------------+--------+---------------------+--------------------
999 1111.2222.3333 dynamic ip Port-channel1
Cat 2960
Mac Address Table
-------------------------------------------
Vlan Mac Address Type Ports
---- ----------- -------- -----
All 1111.2222.3333 STATIC CPU
|
def image_member(self):
"""
Returns a json-schema document that represents an image member entity.
(a container of member entities).
"""
uri = "/%s/member" % self.uri_base
resp, resp_body = self.api.method_get(uri)
return resp_body
|
Returns a json-schema document that represents an image member entity.
(a container of member entities).
|
def _get_route_args(self, namespace, route, tag=False): # pylint: disable=unused-argument
"""Returns a list of name / value string pairs representing the arguments for
a particular route."""
data_type, _ = unwrap_nullable(route.arg_data_type)
if is_struct_type(data_type):
arg_list = []
for field in data_type.all_fields:
arg_list.append((fmt_var(field.name), fmt_type(
field.data_type, tag=tag, has_default=field.has_default)))
doc_list = [(fmt_var(f.name), self.process_doc(f.doc, self._docf))
for f in data_type.fields if f.doc]
elif is_union_type(data_type):
arg_list = [(fmt_var(data_type.name), fmt_type(
route.arg_data_type, tag=tag))]
doc_list = [(fmt_var(data_type.name),
self.process_doc(data_type.doc,
self._docf) if data_type.doc
else 'The {} union'.format(
fmt_class(data_type
.name)))]
else:
arg_list = []
doc_list = []
return arg_list, doc_list
|
Returns a list of name / value string pairs representing the arguments for
a particular route.
|
def _parse_action(action):
"""
Parses a single action item, for instance one of the following:
m; m(); m(True); m(*)
The brackets must match.
"""
i_open = action.find('(')
if i_open is -1:
# return action name, finished
return {'name': action, 'args': [], 'event_args': False}
# we need to parse the arguments
i_close = action.rfind(')')
if i_close is -1:
raise Exception('Bracket in argument opened but not closed.')
action_name = action[:i_open]
arglist = action[i_open+1:i_close].strip()
if not arglist:
# no arglist, just return method name
return {'name': action_name, 'args': [], 'event_args': False}
if '*' in arglist:
return {'name': action_name, 'args': [], 'event_args': True}
return {'name': action_name, 'args': _parse_arg_list(arglist), 'event_args': False}
|
Parses a single action item, for instance one of the following:
m; m(); m(True); m(*)
The brackets must match.
|
def findattr(self, name, resolved=True):
"""
Find an attribute type definition.
@param name: An attribute name.
@type name: basestring
@param resolved: A flag indicating that the fully resolved type should
be returned.
@type resolved: boolean
@return: The found schema I{type}
@rtype: L{xsd.sxbase.SchemaObject}
"""
name = '@%s' % name
parent = self.top().resolved
if parent is None:
result, ancestry = self.query(name, node)
else:
result, ancestry = self.getchild(name, parent)
if result is None:
return result
if resolved:
result = result.resolve()
return result
|
Find an attribute type definition.
@param name: An attribute name.
@type name: basestring
@param resolved: A flag indicating that the fully resolved type should
be returned.
@type resolved: boolean
@return: The found schema I{type}
@rtype: L{xsd.sxbase.SchemaObject}
|
def _preprocess_Y(self, Y, k):
"""Convert Y to prob labels if necessary"""
Y = Y.clone()
# If preds, convert to probs
if Y.dim() == 1 or Y.shape[1] == 1:
Y = pred_to_prob(Y.long(), k=k)
return Y
|
Convert Y to prob labels if necessary
|
def get_totals_by_payee(self, account, start_date=None, end_date=None):
"""
Returns transaction totals grouped by Payee.
"""
qs = Transaction.objects.filter(account=account, parent__isnull=True)
qs = qs.values('payee').annotate(models.Sum('value_gross'))
qs = qs.order_by('payee__name')
return qs
|
Returns transaction totals grouped by Payee.
|
def get_response_object(self, service_id, version_number, name):
"""Gets the specified Response Object."""
content = self._fetch("/service/%s/version/%d/response_object/%s" % (service_id, version_number, name))
return FastlyResponseObject(self, content)
|
Gets the specified Response Object.
|
def tv_to_rdf(infile_name, outfile_name):
"""
Convert a SPDX file from tag/value format to RDF format.
Return True on sucess, False otherwise.
"""
parser = Parser(Builder(), StandardLogger())
parser.build()
with open(infile_name) as infile:
data = infile.read()
document, error = parser.parse(data)
if not error:
with open(outfile_name, mode='w') as outfile:
write_document(document, outfile)
return True
else:
print('Errors encountered while parsing RDF file.')
messages = []
document.validate(messages)
print('\n'.join(messages))
return False
|
Convert a SPDX file from tag/value format to RDF format.
Return True on sucess, False otherwise.
|
def from_mongo(cls, doc):
"""Convert data coming in from the MongoDB wire driver into a Document instance."""
if doc is None: # To support simplified iterative use, None should return None.
return None
if isinstance(doc, Document): # No need to perform processing on existing Document instances.
return doc
if cls.__type_store__ and cls.__type_store__ in doc: # Instantiate specific class mentioned in the data.
cls = load(doc[cls.__type_store__], 'marrow.mongo.document')
# Prepare a new instance in such a way that changes to the instance will be reflected in the originating doc.
instance = cls(_prepare_defaults=False) # Construct an instance, but delay default value processing.
instance.__data__ = doc # I am Popeye of Borg (pattern); you will be askimilgrated.
instance._prepare_defaults() # pylint:disable=protected-access -- deferred default value processing.
return instance
|
Convert data coming in from the MongoDB wire driver into a Document instance.
|
def reload_configuration(self, event):
"""Event triggered configuration reload"""
if event.target == self.uniquename:
self.log('Reloading configuration')
self._read_config()
|
Event triggered configuration reload
|
def setValue(self, newText):
"""Sets a text value (string) into the text field."""
newText = str(newText) # attempt to convert to string (might be int or float ...)
if self.text == newText:
return # nothing to change
self.text = newText # save the new text
textLines = self.text.splitlines()
nLines = len(textLines)
surfacesList = [] # build up a list of surfaces, one for each line of original text
actualWidth = 0 # will eventually be set the width of longest line
for line in textLines:
lineSurface = self.font.render(line, True, self.textColor)
surfacesList.append(lineSurface)
thisRect = lineSurface.get_rect()
if thisRect.width > actualWidth:
actualWidth = thisRect.width
heightOfOneLine = self.fontHeight
actualHeight = nLines * heightOfOneLine
self.rect = pygame.Rect(self.loc[0], self.loc[1], actualWidth, actualHeight)
# Create one larger surface, then blit all line surfaces into it
# Special flags are needed to set the background alpha as transparent
self.textImage = pygame.Surface((actualWidth, actualHeight), flags=SRCALPHA)
if self.backgroundColor is not None:
self.textImage.fill(self.backgroundColor)
thisLineTop = 0
for lineSurface in surfacesList:
if self.justified == 'left':
self.textImage.blit(lineSurface, (0, thisLineTop))
else:
thisSurfaceWidth = lineSurface.get_rect()[2] # element 2 is the width
if self.justified == 'center':
theLeft = (actualWidth - thisSurfaceWidth) / 2
elif self.justified == 'right': # right justified
theLeft = actualWidth - thisSurfaceWidth
else:
raise Exception('Value of justified was: ' + self.justified + '. Must be left, center, or right')
self.textImage.blit(lineSurface, (theLeft, thisLineTop))
thisLineTop = thisLineTop + heightOfOneLine
if self.useSpecifiedArea:
# Fit the text image into a user specified area, may truncate the text off left, right, or bottom
textRect = self.textImage.get_rect()
if self.userWidth is None:
theWidth = textRect.width
else:
theWidth = self.userWidth
if self.userHeight is None:
theHeight = textRect.height
else:
theHeight = self.userHeight
# Create a surface that is the size that the user asked for
userSizedImage = pygame.Surface((theWidth, theHeight), flags=SRCALPHA)
self.rect = pygame.Rect(self.loc[0], self.loc[1], theWidth, theHeight)
if self.backgroundColor is not None:
userSizedImage.fill(self.backgroundColor)
# Figure out the appropriate left edge within the userSizedImage
if self.justified == 'left':
theLeft = 0
elif self.justified == 'center':
theLeft = (theWidth - textRect.width) / 2
else: # right justified
theLeft = theWidth - textRect.width
# Copy the appropriate part from the text image into the user sized image
# Then re-name it to the textImage so it can be drawn later
userSizedImage.blit(self.textImage, (theLeft, 0))
self.textImage = userSizedImage
self.textImage = pygame.Surface.convert_alpha(self.textImage)
|
Sets a text value (string) into the text field.
|
def select(self, key=None, val=None, touch=None, log='any', out=int):
""" Return the indices of the rays matching selection criteria
The criterion can be of two types:
- a key found in self.dchans, with a matching value
- a touch tuple (indicating which element in self.config is touched
by the desired rays)
Parameters
----------
key : None / str
A key to be found in self.dchans
val : int / str / float / list of such
The value to be matched
If a list of values is provided, the behaviour depends on log
log : str
A flag indicating which behaviour to use when val is a list
- any : Returns indices of rays matching any value in val
- all : Returns indices of rays matching all values in val
- not : Returns indices of rays matching None of the val
touch: None / str / int / tuple
Used if key is None
Tuple that can be of len()=1, 2 or 3
Tuple indicating you want the rays that are touching some specific elements of self.config:
- touch[0] : str / int or list of such
str : a 'Cls_Name' string indicating the element
int : the index of the element in self.lStruct_computeInOut
- touch[1] : int / list of int
Indices of the desired segments on the polygon
(i.e.: of the cross-section polygon of the above element)
- touch[2] : int / list of int
Indices, if relevant, of the toroidal / linear unit
Only relevant when the element has noccur>1
In this case only log='not' has an effect
out : str
Flag indicating whether to return:
- bool : a (nRays,) boolean array of indices
- int : a (N,) array of int indices (N=number of matching rays)
Returns
-------
ind : np.ndarray
The array of matching rays
"""
assert out in [int,bool]
assert log in ['any','all','not']
C = [key is None,touch is None]
assert np.sum(C)>=1
if np.sum(C)==2:
ind = np.ones((self.nRays,),dtype=bool)
else:
if key is not None:
assert type(key) is str and key in self._dchans.keys()
ltypes = [str,int,float,np.int64,np.float64]
C0 = type(val) in ltypes
C1 = type(val) in [list,tuple,np.ndarray]
assert C0 or C1
if C0:
val = [val]
else:
assert all([type(vv) in ltypes for vv in val])
ind = np.vstack([self._dchans[key]==ii for ii in val])
if log=='any':
ind = np.any(ind,axis=0)
elif log=='all':
ind = np.all(ind,axis=0)
else:
ind = ~np.any(ind,axis=0)
elif touch is not None:
lint = [int,np.int64]
larr = [list,tuple,np.ndarray]
touch = [touch] if not type(touch) is list else touch
assert len(touch) in [1,2,3]
def _check_touch(tt):
cS = type(tt) is str and len(tt.split('_'))==2
c0 = type(tt) in lint
c1 = type(tt) in larr and len(tt)>=0
c1 = c1 and all([type(t) in lint for t in tt])
return cS, c0, c1
for ii in range(0,3-len(touch)):
touch.append([])
ntouch = len(touch)
assert ntouch == 3
for ii in range(0,ntouch):
cS, c0, c1 = _check_touch(touch[ii])
if not (cS or c0 or c1):
msg = "Provided touch is not valid:\n"%touch
msg += " - Provided: %s\n"%str(touch)
msg += "Please provide either:\n"
msg += " - str in the form 'Cls_Name'\n"
msg += " - int (index)\n"
msg += " - array of int indices"
raise Exception(msg)
if cS:
lS = self.lStruct_computeInOut
k0, k1 = touch[ii].split('_')
ind = [jj for jj in range(0,len(lS))
if lS[jj].Id.Cls==k0 and lS[jj].Id.Name==k1]
assert len(ind)==1
touch[ii] = [ind[0]]
elif c0:
touch[ii] = [touch[ii]]
# Common part
ind = np.zeros((ntouch,self.nRays),dtype=bool)
for i in range(0,ntouch):
if len(touch[i])==0:
ind[i,:] = True
else:
for n in range(0,len(touch[i])):
ind[i,:] = np.logical_or(ind[i,:],
self._dgeom['indout'][i,:]==touch[i][n])
ind = np.all(ind,axis=0)
if log=='not':
ind[:] = ~ind
if out is int:
ind = ind.nonzero()[0]
return ind
|
Return the indices of the rays matching selection criteria
The criterion can be of two types:
- a key found in self.dchans, with a matching value
- a touch tuple (indicating which element in self.config is touched
by the desired rays)
Parameters
----------
key : None / str
A key to be found in self.dchans
val : int / str / float / list of such
The value to be matched
If a list of values is provided, the behaviour depends on log
log : str
A flag indicating which behaviour to use when val is a list
- any : Returns indices of rays matching any value in val
- all : Returns indices of rays matching all values in val
- not : Returns indices of rays matching None of the val
touch: None / str / int / tuple
Used if key is None
Tuple that can be of len()=1, 2 or 3
Tuple indicating you want the rays that are touching some specific elements of self.config:
- touch[0] : str / int or list of such
str : a 'Cls_Name' string indicating the element
int : the index of the element in self.lStruct_computeInOut
- touch[1] : int / list of int
Indices of the desired segments on the polygon
(i.e.: of the cross-section polygon of the above element)
- touch[2] : int / list of int
Indices, if relevant, of the toroidal / linear unit
Only relevant when the element has noccur>1
In this case only log='not' has an effect
out : str
Flag indicating whether to return:
- bool : a (nRays,) boolean array of indices
- int : a (N,) array of int indices (N=number of matching rays)
Returns
-------
ind : np.ndarray
The array of matching rays
|
def read_detections(fname):
"""
Read detections from a file to a list of Detection objects.
:type fname: str
:param fname: File to read from, must be a file written to by \
Detection.write.
:returns: list of :class:`eqcorrscan.core.match_filter.Detection`
:rtype: list
.. note::
:class:`eqcorrscan.core.match_filter.Detection`'s returned do not
contain Detection.event
"""
f = open(fname, 'r')
detections = []
for index, line in enumerate(f):
if index == 0:
continue # Skip header
if line.rstrip().split('; ')[0] == 'Template name':
continue # Skip any repeated headers
detection = line.rstrip().split('; ')
detection[1] = UTCDateTime(detection[1])
detection[2] = int(float(detection[2]))
detection[3] = ast.literal_eval(detection[3])
detection[4] = float(detection[4])
detection[5] = float(detection[5])
if len(detection) < 9:
detection.extend(['Unset', float('NaN')])
else:
detection[7] = float(detection[7])
detections.append(Detection(
template_name=detection[0], detect_time=detection[1],
no_chans=detection[2], detect_val=detection[4],
threshold=detection[5], threshold_type=detection[6],
threshold_input=detection[7], typeofdet=detection[8],
chans=detection[3]))
f.close()
return detections
|
Read detections from a file to a list of Detection objects.
:type fname: str
:param fname: File to read from, must be a file written to by \
Detection.write.
:returns: list of :class:`eqcorrscan.core.match_filter.Detection`
:rtype: list
.. note::
:class:`eqcorrscan.core.match_filter.Detection`'s returned do not
contain Detection.event
|
def set_role(username, role):
'''
Assign role to username
.. code-block:: bash
salt '*' onyx.cmd set_role username=daniel role=vdc-admin
'''
try:
sendline('config terminal')
role_line = 'username {0} role {1}'.format(username, role)
ret = sendline(role_line)
sendline('end')
sendline('copy running-config startup-config')
return '\n'.join([role_line, ret])
except TerminalException as e:
log.error(e)
return 'Failed to set password'
|
Assign role to username
.. code-block:: bash
salt '*' onyx.cmd set_role username=daniel role=vdc-admin
|
def _sanity_check_block_pairwise_constraints(ir_blocks):
"""Assert that adjacent blocks obey all invariants."""
for first_block, second_block in pairwise(ir_blocks):
# Always Filter before MarkLocation, never after.
if isinstance(first_block, MarkLocation) and isinstance(second_block, Filter):
raise AssertionError(u'Found Filter after MarkLocation block: {}'.format(ir_blocks))
# There's no point in marking the same location twice in a row.
if isinstance(first_block, MarkLocation) and isinstance(second_block, MarkLocation):
raise AssertionError(u'Found consecutive MarkLocation blocks: {}'.format(ir_blocks))
# Traverse blocks with optional=True are immediately followed
# by a MarkLocation, CoerceType or Filter block.
if isinstance(first_block, Traverse) and first_block.optional:
if not isinstance(second_block, (MarkLocation, CoerceType, Filter)):
raise AssertionError(u'Expected MarkLocation, CoerceType or Filter after Traverse '
u'with optional=True. Found: {}'.format(ir_blocks))
# Backtrack blocks with optional=True are immediately followed by a MarkLocation block.
if isinstance(first_block, Backtrack) and first_block.optional:
if not isinstance(second_block, MarkLocation):
raise AssertionError(u'Expected MarkLocation after Backtrack with optional=True, '
u'but none was found: {}'.format(ir_blocks))
# Recurse blocks are immediately preceded by a MarkLocation or Backtrack block.
if isinstance(second_block, Recurse):
if not (isinstance(first_block, MarkLocation) or isinstance(first_block, Backtrack)):
raise AssertionError(u'Expected MarkLocation or Backtrack before Recurse, but none '
u'was found: {}'.format(ir_blocks))
|
Assert that adjacent blocks obey all invariants.
|
def _select_next_server(self):
"""
Looks up in the server pool for an available server
and attempts to connect.
"""
while True:
if len(self._server_pool) == 0:
self._current_server = None
raise ErrNoServers
now = time.monotonic()
s = self._server_pool.pop(0)
if self.options["max_reconnect_attempts"] > 0:
if s.reconnects > self.options["max_reconnect_attempts"]:
# Discard server since already tried to reconnect too many times
continue
# Not yet exceeded max_reconnect_attempts so can still use
# this server in the future.
self._server_pool.append(s)
if s.last_attempt is not None and now < s.last_attempt + self.options["reconnect_time_wait"]:
# Backoff connecting to server if we attempted recently.
yield from asyncio.sleep(self.options["reconnect_time_wait"], loop=self._loop)
try:
s.last_attempt = time.monotonic()
r, w = yield from asyncio.open_connection(
s.uri.hostname,
s.uri.port,
loop=self._loop,
limit=DEFAULT_BUFFER_SIZE)
self._current_server = s
# We keep a reference to the initial transport we used when
# establishing the connection in case we later upgrade to TLS
# after getting the first INFO message. This is in order to
# prevent the GC closing the socket after we send CONNECT
# and replace the transport.
#
# See https://github.com/nats-io/asyncio-nats/issues/43
self._bare_io_reader = self._io_reader = r
self._bare_io_writer = self._io_writer = w
break
except Exception as e:
s.last_attempt = time.monotonic()
s.reconnects += 1
self._err = e
if self._error_cb is not None:
yield from self._error_cb(e)
continue
|
Looks up in the server pool for an available server
and attempts to connect.
|
def getLVstats(self, *args):
"""Returns I/O stats for LV.
@param args: Two calling conventions are implemented:
- Passing two parameters vg and lv.
- Passing only one parameter in 'vg-lv' format.
@return: Dict of stats.
"""
if not len(args) in (1, 2):
raise TypeError("The getLVstats must be called with either "
"one or two arguments.")
if self._vgTree is None:
self._initDMinfo()
if len(args) == 1:
dmdev = self._mapLVname2dm.get(args[0])
else:
dmdev = self._mapLVtuple2dm.get(args)
if dmdev is not None:
return self.getDevStats(dmdev)
else:
return None
|
Returns I/O stats for LV.
@param args: Two calling conventions are implemented:
- Passing two parameters vg and lv.
- Passing only one parameter in 'vg-lv' format.
@return: Dict of stats.
|
def add_to_env(self, content):
"""
add content to the env script.
"""
if not self.rewrite_config:
raise DirectoryException("Error! Directory was not intialized w/ rewrite_config.")
if not self.env_file:
self.env_path, self.env_file = self.__get_env_handle(self.root_dir)
self.env_file.write(content + '\n')
|
add content to the env script.
|
def CreateTask(self, session_identifier):
"""Creates a task.
Args:
session_identifier (str): the identifier of the session the task is
part of.
Returns:
Task: task attribute container.
"""
task = tasks.Task(session_identifier)
logger.debug('Created task: {0:s}.'.format(task.identifier))
with self._lock:
self._tasks_queued[task.identifier] = task
self._total_number_of_tasks += 1
self.SampleTaskStatus(task, 'created')
return task
|
Creates a task.
Args:
session_identifier (str): the identifier of the session the task is
part of.
Returns:
Task: task attribute container.
|
def coge(args):
"""
%prog coge cogefile
Convert CoGe file to anchors file.
"""
p = OptionParser(coge.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
cogefile, = args
fp = must_open(cogefile)
cogefile = cogefile.replace(".gz", "")
ksfile = cogefile + ".ks"
anchorsfile = cogefile + ".anchors"
fw_ks = must_open(ksfile, "w")
fw_ac = must_open(anchorsfile, "w")
tag = "###"
print(tag, file=fw_ks)
for header, lines in read_block(fp, tag):
print(tag, file=fw_ac)
lines = list(lines)
for line in lines:
if line[0] == '#':
continue
ks, ka, achr, a, astart, astop, bchr, \
b, bstart, bstop, ev, ss = line.split()
a = a.split("||")[3]
b = b.split("||")[3]
print("\t".join((a, b, ev)), file=fw_ac)
print(",".join((";".join((a, b)), ks, ka, ks, ka)), file=fw_ks)
fw_ks.close()
fw_ac.close()
|
%prog coge cogefile
Convert CoGe file to anchors file.
|
def on_step_end(self, **kwargs):
"Put the LR back to its value if necessary."
if not self.learn.gan_trainer.gen_mode: self.learn.opt.lr /= self.mult_lr
|
Put the LR back to its value if necessary.
|
def compute_xy(
self, projection: Union[pyproj.Proj, crs.Projection, None] = None
):
"""Computes x and y columns from latitudes and longitudes.
The source projection is WGS84 (EPSG 4326).
The default destination projection is a Lambert Conformal Conical
projection centered on the data inside the dataframe.
For consistency reasons with pandas DataFrame, a new Traffic structure
is returned.
"""
if isinstance(projection, crs.Projection):
projection = pyproj.Proj(projection.proj4_init)
if projection is None:
projection = pyproj.Proj(
proj="lcc",
lat_1=self.data.latitude.min(),
lat_2=self.data.latitude.max(),
lat_0=self.data.latitude.mean(),
lon_0=self.data.longitude.mean(),
)
x, y = pyproj.transform(
pyproj.Proj(init="EPSG:4326"),
projection,
self.data.longitude.values,
self.data.latitude.values,
)
return self.__class__(self.data.assign(x=x, y=y))
|
Computes x and y columns from latitudes and longitudes.
The source projection is WGS84 (EPSG 4326).
The default destination projection is a Lambert Conformal Conical
projection centered on the data inside the dataframe.
For consistency reasons with pandas DataFrame, a new Traffic structure
is returned.
|
def generate_documentation(schema):
"""
Generates reStructuredText documentation from a Confirm file.
:param schema: Dictionary representing the Confirm schema.
:returns: String representing the reStructuredText documentation.
"""
documentation_title = "Configuration documentation"
documentation = documentation_title + "\n"
documentation += "=" * len(documentation_title) + '\n'
for section_name in schema:
section_created = False
for option_name in schema[section_name]:
option = schema[section_name][option_name]
if not section_created:
documentation += '\n'
documentation += section_name + '\n'
documentation += '-' * len(section_name) + '\n'
section_created = True
documentation += '\n'
documentation += option_name + '\n'
documentation += '~' * len(option_name) + '\n'
if option.get('required'):
documentation += "** This option is required! **\n"
if option.get('type'):
documentation += '*Type : %s.*\n' % option.get('type')
if option.get('description'):
documentation += option.get('description') + '\n'
if option.get('default'):
documentation += 'The default value is %s.\n' % option.get('default')
if option.get('deprecated'):
documentation += "** This option is deprecated! **\n"
return documentation
|
Generates reStructuredText documentation from a Confirm file.
:param schema: Dictionary representing the Confirm schema.
:returns: String representing the reStructuredText documentation.
|
def add_ospf_area(self, ospf_area, ospf_interface_setting=None, network=None,
communication_mode='NOT_FORCED', unicast_ref=None):
"""
Add OSPF Area to this routing node.
Communication mode specifies how the interface will interact with the
adjacent OSPF environment. Please see SMC API documentation for more
in depth information on each option.
If the interface has multiple networks nested below, all networks
will receive the OSPF area by default unless the ``network`` parameter
is specified. OSPF cannot be applied to IPv6 networks.
Example of adding an area to interface routing node::
area = OSPFArea('area0') #obtain area resource
#Set on routing interface 0
interface = engine.routing.get(0)
interface.add_ospf_area(area)
.. note:: If UNICAST is specified, you must also provide a unicast_ref
of element type Host to identify the remote host. If no
unicast_ref is provided, this is skipped
:param OSPFArea ospf_area: OSPF area instance or href
:param OSPFInterfaceSetting ospf_interface_setting: used to override the
OSPF settings for this interface (optional)
:param str network: if network specified, only add OSPF to this network
on interface
:param str communication_mode: NOT_FORCED|POINT_TO_POINT|PASSIVE|UNICAST
:param Element unicast_ref: Element used as unicast gw (required for UNICAST)
:raises ModificationAborted: Change must be made at the interface level
:raises UpdateElementFailed: failure updating routing
:raises ElementNotFound: ospf area not found
:return: Status of whether the route table was updated
:rtype: bool
"""
communication_mode = communication_mode.upper()
destinations=[] if not ospf_interface_setting else [ospf_interface_setting]
if communication_mode == 'UNICAST' and unicast_ref:
destinations.append(unicast_ref)
routing_node_gateway = RoutingNodeGateway(
ospf_area, communication_mode=communication_mode,
destinations=destinations)
return self._add_gateway_node('ospfv2_area', routing_node_gateway, network)
|
Add OSPF Area to this routing node.
Communication mode specifies how the interface will interact with the
adjacent OSPF environment. Please see SMC API documentation for more
in depth information on each option.
If the interface has multiple networks nested below, all networks
will receive the OSPF area by default unless the ``network`` parameter
is specified. OSPF cannot be applied to IPv6 networks.
Example of adding an area to interface routing node::
area = OSPFArea('area0') #obtain area resource
#Set on routing interface 0
interface = engine.routing.get(0)
interface.add_ospf_area(area)
.. note:: If UNICAST is specified, you must also provide a unicast_ref
of element type Host to identify the remote host. If no
unicast_ref is provided, this is skipped
:param OSPFArea ospf_area: OSPF area instance or href
:param OSPFInterfaceSetting ospf_interface_setting: used to override the
OSPF settings for this interface (optional)
:param str network: if network specified, only add OSPF to this network
on interface
:param str communication_mode: NOT_FORCED|POINT_TO_POINT|PASSIVE|UNICAST
:param Element unicast_ref: Element used as unicast gw (required for UNICAST)
:raises ModificationAborted: Change must be made at the interface level
:raises UpdateElementFailed: failure updating routing
:raises ElementNotFound: ospf area not found
:return: Status of whether the route table was updated
:rtype: bool
|
def create_mosaic(tiles, nodata=0):
"""
Create a mosaic from tiles. Tiles must be connected (also possible over Antimeridian),
otherwise strange things can happen!
Parameters
----------
tiles : iterable
an iterable containing tuples of a BufferedTile and an array
nodata : integer or float
raster nodata value to initialize the mosaic with (default: 0)
Returns
-------
mosaic : ReferencedRaster
"""
if isinstance(tiles, GeneratorType):
tiles = list(tiles)
elif not isinstance(tiles, list):
raise TypeError("tiles must be either a list or generator")
if not all([isinstance(pair, tuple) for pair in tiles]):
raise TypeError("tiles items must be tuples")
if not all([
all([isinstance(tile, BufferedTile), isinstance(data, np.ndarray)])
for tile, data in tiles
]):
raise TypeError("tuples must be pairs of BufferedTile and array")
if len(tiles) == 0:
raise ValueError("tiles list is empty")
logger.debug("create mosaic from %s tile(s)", len(tiles))
# quick return if there is just one tile
if len(tiles) == 1:
tile, data = tiles[0]
return ReferencedRaster(
data=data,
affine=tile.affine,
bounds=tile.bounds,
crs=tile.crs
)
# assert all tiles have same properties
pyramid, resolution, dtype = _get_tiles_properties(tiles)
# just handle antimeridian on global pyramid types
shift = _shift_required(tiles)
# determine mosaic shape and reference
m_left, m_bottom, m_right, m_top = None, None, None, None
for tile, data in tiles:
num_bands = data.shape[0] if data.ndim > 2 else 1
left, bottom, right, top = tile.bounds
if shift:
# shift by half of the grid width
left += pyramid.x_size / 2
right += pyramid.x_size / 2
# if tile is now shifted outside pyramid bounds, move within
if right > pyramid.right:
right -= pyramid.x_size
left -= pyramid.x_size
m_left = min([left, m_left]) if m_left is not None else left
m_bottom = min([bottom, m_bottom]) if m_bottom is not None else bottom
m_right = max([right, m_right]) if m_right is not None else right
m_top = max([top, m_top]) if m_top is not None else top
height = int(round((m_top - m_bottom) / resolution))
width = int(round((m_right - m_left) / resolution))
# initialize empty mosaic
mosaic = ma.MaskedArray(
data=np.full((num_bands, height, width), dtype=dtype, fill_value=nodata),
mask=np.ones((num_bands, height, width))
)
# create Affine
affine = Affine(resolution, 0, m_left, 0, -resolution, m_top)
# fill mosaic array with tile data
for tile, data in tiles:
data = prepare_array(data, nodata=nodata, dtype=dtype)
t_left, t_bottom, t_right, t_top = tile.bounds
if shift:
t_left += pyramid.x_size / 2
t_right += pyramid.x_size / 2
# if tile is now shifted outside pyramid bounds, move within
if t_right > pyramid.right:
t_right -= pyramid.x_size
t_left -= pyramid.x_size
minrow, maxrow, mincol, maxcol = bounds_to_ranges(
out_bounds=(t_left, t_bottom, t_right, t_top),
in_affine=affine,
in_shape=(height, width)
)
mosaic[:, minrow:maxrow, mincol:maxcol] = data
mosaic.mask[:, minrow:maxrow, mincol:maxcol] = data.mask
if shift:
# shift back output mosaic
affine = Affine(resolution, 0, m_left - pyramid.x_size / 2, 0, -resolution, m_top)
return ReferencedRaster(
data=mosaic,
affine=affine,
bounds=Bounds(m_left, m_bottom, m_right, m_top),
crs=tile.crs
)
|
Create a mosaic from tiles. Tiles must be connected (also possible over Antimeridian),
otherwise strange things can happen!
Parameters
----------
tiles : iterable
an iterable containing tuples of a BufferedTile and an array
nodata : integer or float
raster nodata value to initialize the mosaic with (default: 0)
Returns
-------
mosaic : ReferencedRaster
|
def get_loc_level(self, key, level=0, drop_level=True):
"""
Get both the location for the requested label(s) and the
resulting sliced index.
Parameters
----------
key : label or sequence of labels
level : int/level name or list thereof, optional
drop_level : bool, default True
if ``False``, the resulting index will not drop any level.
Returns
-------
loc : A 2-tuple where the elements are:
Element 0: int, slice object or boolean array
Element 1: The resulting sliced multiindex/index. If the key
contains all levels, this will be ``None``.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')],
... names=['A', 'B'])
>>> mi.get_loc_level('b')
(slice(1, 3, None), Index(['e', 'f'], dtype='object', name='B'))
>>> mi.get_loc_level('e', level='B')
(array([False, True, False], dtype=bool),
Index(['b'], dtype='object', name='A'))
>>> mi.get_loc_level(['b', 'e'])
(1, None)
See Also
---------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
def maybe_droplevels(indexer, levels, drop_level):
if not drop_level:
return self[indexer]
# kludgearound
orig_index = new_index = self[indexer]
levels = [self._get_level_number(i) for i in levels]
for i in sorted(levels, reverse=True):
try:
new_index = new_index.droplevel(i)
except ValueError:
# no dropping here
return orig_index
return new_index
if isinstance(level, (tuple, list)):
if len(key) != len(level):
raise AssertionError('Key for location must have same '
'length as number of levels')
result = None
for lev, k in zip(level, key):
loc, new_index = self.get_loc_level(k, level=lev)
if isinstance(loc, slice):
mask = np.zeros(len(self), dtype=bool)
mask[loc] = True
loc = mask
result = loc if result is None else result & loc
return result, maybe_droplevels(result, level, drop_level)
level = self._get_level_number(level)
# kludge for #1796
if isinstance(key, list):
key = tuple(key)
if isinstance(key, tuple) and level == 0:
try:
if key in self.levels[0]:
indexer = self._get_level_indexer(key, level=level)
new_index = maybe_droplevels(indexer, [0], drop_level)
return indexer, new_index
except TypeError:
pass
if not any(isinstance(k, slice) for k in key):
# partial selection
# optionally get indexer to avoid re-calculation
def partial_selection(key, indexer=None):
if indexer is None:
indexer = self.get_loc(key)
ilevels = [i for i in range(len(key))
if key[i] != slice(None, None)]
return indexer, maybe_droplevels(indexer, ilevels,
drop_level)
if len(key) == self.nlevels and self.is_unique:
# Complete key in unique index -> standard get_loc
return (self._engine.get_loc(key), None)
else:
return partial_selection(key)
else:
indexer = None
for i, k in enumerate(key):
if not isinstance(k, slice):
k = self._get_level_indexer(k, level=i)
if isinstance(k, slice):
# everything
if k.start == 0 and k.stop == len(self):
k = slice(None, None)
else:
k_index = k
if isinstance(k, slice):
if k == slice(None, None):
continue
else:
raise TypeError(key)
if indexer is None:
indexer = k_index
else: # pragma: no cover
indexer &= k_index
if indexer is None:
indexer = slice(None, None)
ilevels = [i for i in range(len(key))
if key[i] != slice(None, None)]
return indexer, maybe_droplevels(indexer, ilevels, drop_level)
else:
indexer = self._get_level_indexer(key, level=level)
return indexer, maybe_droplevels(indexer, [level], drop_level)
|
Get both the location for the requested label(s) and the
resulting sliced index.
Parameters
----------
key : label or sequence of labels
level : int/level name or list thereof, optional
drop_level : bool, default True
if ``False``, the resulting index will not drop any level.
Returns
-------
loc : A 2-tuple where the elements are:
Element 0: int, slice object or boolean array
Element 1: The resulting sliced multiindex/index. If the key
contains all levels, this will be ``None``.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')],
... names=['A', 'B'])
>>> mi.get_loc_level('b')
(slice(1, 3, None), Index(['e', 'f'], dtype='object', name='B'))
>>> mi.get_loc_level('e', level='B')
(array([False, True, False], dtype=bool),
Index(['b'], dtype='object', name='A'))
>>> mi.get_loc_level(['b', 'e'])
(1, None)
See Also
---------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
|
def to_dict(self, omit=()):
"""
Return a (shallow) copy of self cast to a dictionary,
optionally omitting some key/value pairs.
"""
result = dict(self)
for key in omit:
if key in result:
del result[key]
return result
|
Return a (shallow) copy of self cast to a dictionary,
optionally omitting some key/value pairs.
|
def pickle_load(cls, filepath):
"""
Loads the object from a pickle file.
Args:
filepath: Filename or directory name. It filepath is a directory, we
scan the directory tree starting from filepath and we
read the first pickle database. Raise RuntimeError if multiple
databases are found.
"""
if os.path.isdir(filepath):
# Walk through each directory inside path and find the pickle database.
for dirpath, dirnames, filenames in os.walk(filepath):
fnames = [f for f in filenames if f == cls.PICKLE_FNAME]
if fnames:
if len(fnames) == 1:
filepath = os.path.join(dirpath, fnames[0])
break # Exit os.walk
else:
err_msg = "Found multiple databases:\n %s" % str(fnames)
raise RuntimeError(err_msg)
else:
err_msg = "Cannot find %s inside directory %s" % (cls.PICKLE_FNAME, filepath)
raise ValueError(err_msg)
with open(filepath, "rb") as fh:
new = pickle.load(fh)
# new.flows is a list of strings with the workdir of the flows (see __getstate__).
# Here we read the Flow from the pickle file so that we have
# and up-to-date version and we set the flow in visitor_mode
from .flows import Flow
flow_workdirs, new.flows = new.flows, []
for flow in map(Flow.pickle_load, flow_workdirs):
new.add_flow(flow)
return new
|
Loads the object from a pickle file.
Args:
filepath: Filename or directory name. It filepath is a directory, we
scan the directory tree starting from filepath and we
read the first pickle database. Raise RuntimeError if multiple
databases are found.
|
def get_stream(self, session_id, stream_id):
"""
Returns an Stream object that contains information of an OpenTok stream:
-id: The stream ID
-videoType: "camera" or "screen"
-name: The stream name (if one was set when the client published the stream)
-layoutClassList: It's an array of the layout classes for the stream
"""
endpoint = self.endpoints.get_stream_url(session_id, stream_id)
response = requests.get(
endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout
)
if response.status_code == 200:
return Stream(response.json())
elif response.status_code == 400:
raise GetStreamError('Invalid request. This response may indicate that data in your request data is invalid JSON. Or it may indicate that you do not pass in a session ID or you passed in an invalid stream ID.')
elif response.status_code == 403:
raise AuthError('You passed in an invalid OpenTok API key or JWT token.')
elif response.status_code == 408:
raise GetStreamError('You passed in an invalid stream ID.')
else:
raise RequestError('An unexpected error occurred', response.status_code)
|
Returns an Stream object that contains information of an OpenTok stream:
-id: The stream ID
-videoType: "camera" or "screen"
-name: The stream name (if one was set when the client published the stream)
-layoutClassList: It's an array of the layout classes for the stream
|
def get_service_inspect(self, stack, service):
"""查看服务
查看指定名称服务的属性。
Args:
- stack: 服务所属的服务组名称
- service: 服务名
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回服务信息,失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息
"""
url = '{0}/v3/stacks/{1}/services/{2}/inspect'.format(self.host, stack, service)
return self.__get(url)
|
查看服务
查看指定名称服务的属性。
Args:
- stack: 服务所属的服务组名称
- service: 服务名
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回服务信息,失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息
|
def should_we_load(kls):
""" should we load this class as a check? """
# we don't load abstract classes
if kls.__name__.endswith("AbstractCheck"):
return False
# and we only load checks
if not kls.__name__.endswith("Check"):
return False
mro = kls.__mro__
# and the class needs to be a child of AbstractCheck
for m in mro:
if m.__name__ == "AbstractCheck":
return True
return False
|
should we load this class as a check?
|
def _set_show_zoning_enabled_configuration(self, v, load=False):
"""
Setter method for show_zoning_enabled_configuration, mapped from YANG variable /brocade_zone_rpc/show_zoning_enabled_configuration (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_show_zoning_enabled_configuration is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_show_zoning_enabled_configuration() directly.
YANG Description: This will display the Zoning Enabled-Configuration
database.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=show_zoning_enabled_configuration.show_zoning_enabled_configuration, is_leaf=True, yang_name="show-zoning-enabled-configuration", rest_name="show-zoning-enabled-configuration", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Display the Zoning Enabled-Configuration', u'hidden': u'rpccmd', u'actionpoint': u'show_zoning_configuration_db'}}, namespace='urn:brocade.com:mgmt:brocade-zone', defining_module='brocade-zone', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """show_zoning_enabled_configuration must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=show_zoning_enabled_configuration.show_zoning_enabled_configuration, is_leaf=True, yang_name="show-zoning-enabled-configuration", rest_name="show-zoning-enabled-configuration", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Display the Zoning Enabled-Configuration', u'hidden': u'rpccmd', u'actionpoint': u'show_zoning_configuration_db'}}, namespace='urn:brocade.com:mgmt:brocade-zone', defining_module='brocade-zone', yang_type='rpc', is_config=True)""",
})
self.__show_zoning_enabled_configuration = t
if hasattr(self, '_set'):
self._set()
|
Setter method for show_zoning_enabled_configuration, mapped from YANG variable /brocade_zone_rpc/show_zoning_enabled_configuration (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_show_zoning_enabled_configuration is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_show_zoning_enabled_configuration() directly.
YANG Description: This will display the Zoning Enabled-Configuration
database.
|
def score(self, X, y=None, sample_weight=None):
"""
Apply transforms, and score with the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all
steps of the pipeline.
sample_weight : array-like, default=None
If not None, this argument is passed as ``sample_weight`` keyword
argument to the ``score`` method of the final estimator.
Returns
-------
score : float
"""
Xt, yt, swt = self._transform(X, y, sample_weight)
self.N_test = len(yt)
score_params = {}
if swt is not None:
score_params['sample_weight'] = swt
if self.scorer is None:
return self._final_estimator.score(Xt, yt, **score_params)
return self.scorer(self._final_estimator, Xt, yt, **score_params)
|
Apply transforms, and score with the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all
steps of the pipeline.
sample_weight : array-like, default=None
If not None, this argument is passed as ``sample_weight`` keyword
argument to the ``score`` method of the final estimator.
Returns
-------
score : float
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.