code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def needle(self, serie):
"""Draw a needle for each value"""
serie_node = self.svg.serie(serie)
for i, theta in enumerate(serie.values):
if theta is None:
continue
def point(x, y):
return '%f %f' % self.view((x, y))
val = self._format(serie, i)
metadata = serie.metadata.get(i)
gauges = decorate(
self.svg, self.svg.node(serie_node['plot'], class_="dots"),
metadata
)
tolerance = 1.15
if theta < self._min:
theta = self._min * tolerance
if theta > self._max:
theta = self._max * tolerance
w = (self._box._tmax - self._box._tmin + self.view.aperture) / 4
if self.logarithmic:
w = min(w, self._min - self._min * 10**-10)
alter(
self.svg.node(
gauges,
'path',
d='M %s L %s A %s 1 0 1 %s Z' % (
point(.85, theta),
point(self.needle_width, theta - w),
'%f %f' % (self.needle_width, self.needle_width),
point(self.needle_width, theta + w),
),
class_='line reactive tooltip-trigger'
), metadata
)
x, y = self.view((.75, theta))
self._tooltip_data(gauges, val, x, y, xlabel=self._get_x_label(i))
self._static_value(serie_node, val, x, y, metadata)
|
Draw a needle for each value
|
def _maybe_validate_shape_override(self, override_shape, base_is_scalar,
validate_args, name):
"""Helper to __init__ which ensures override batch/event_shape are valid."""
if override_shape is None:
override_shape = []
override_shape = tf.convert_to_tensor(
value=override_shape, dtype=tf.int32, name=name)
if not dtype_util.is_integer(override_shape.dtype):
raise TypeError("shape override must be an integer")
override_is_scalar = _is_scalar_from_shape_tensor(override_shape)
if tf.get_static_value(override_is_scalar):
return self._empty
dynamic_assertions = []
if tensorshape_util.rank(override_shape.shape) is not None:
if tensorshape_util.rank(override_shape.shape) != 1:
raise ValueError("shape override must be a vector")
elif validate_args:
dynamic_assertions += [
assert_util.assert_rank(
override_shape, 1, message="shape override must be a vector")
]
if tf.get_static_value(override_shape) is not None:
if any(s < 0 for s in tf.get_static_value(override_shape)):
raise ValueError("shape override must have non-negative elements")
elif validate_args:
dynamic_assertions += [
assert_util.assert_non_negative(
override_shape,
message="shape override must have non-negative elements")
]
is_both_nonscalar = prefer_static.logical_and(
prefer_static.logical_not(base_is_scalar),
prefer_static.logical_not(override_is_scalar))
if tf.get_static_value(is_both_nonscalar) is not None:
if tf.get_static_value(is_both_nonscalar):
raise ValueError("base distribution not scalar")
elif validate_args:
dynamic_assertions += [
assert_util.assert_equal(
is_both_nonscalar, False, message="base distribution not scalar")
]
if not dynamic_assertions:
return override_shape
return distribution_util.with_dependencies(
dynamic_assertions, override_shape)
|
Helper to __init__ which ensures override batch/event_shape are valid.
|
def login_exists(login, domain='', **kwargs):
'''
Find if a login exists in the MS SQL server.
domain, if provided, will be prepended to login
CLI Example:
.. code-block:: bash
salt minion mssql.login_exists 'LOGIN'
'''
if domain:
login = '{0}\\{1}'.format(domain, login)
try:
# We should get one, and only one row
return len(tsql_query(query="SELECT name FROM sys.syslogins WHERE name='{0}'".format(login), **kwargs)) == 1
except Exception as e:
return 'Could not find the login: {0}'.format(e)
|
Find if a login exists in the MS SQL server.
domain, if provided, will be prepended to login
CLI Example:
.. code-block:: bash
salt minion mssql.login_exists 'LOGIN'
|
def report(self, name, **kwargs):
"""Add Report data to Batch object.
Args:
name (str): The name for this Group.
file_name (str): The name for the attached file for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
file_content (str;method, kwargs): The file contents or callback method to retrieve
file content.
publish_date (str, kwargs): The publish datetime expression for this Group.
xid (str, kwargs): The external id for this Group.
Returns:
obj: An instance of Report.
"""
group_obj = Report(name, **kwargs)
return self._group(group_obj)
|
Add Report data to Batch object.
Args:
name (str): The name for this Group.
file_name (str): The name for the attached file for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
file_content (str;method, kwargs): The file contents or callback method to retrieve
file content.
publish_date (str, kwargs): The publish datetime expression for this Group.
xid (str, kwargs): The external id for this Group.
Returns:
obj: An instance of Report.
|
def collect(coro, index, results,
preserve_order=False,
return_exceptions=False):
"""
Collect is used internally to execute coroutines and collect the returned
value. This function is intended to be used internally.
"""
result = yield from safe_run(coro, return_exceptions=return_exceptions)
if preserve_order:
results[index] = result
else:
results.append(result)
|
Collect is used internally to execute coroutines and collect the returned
value. This function is intended to be used internally.
|
def getRootJobs(self):
"""
:return: The roots of the connected component of jobs that contains this job. \
A root is a job with no predecessors.
:rtype : set of toil.job.Job instances
"""
roots = set()
visited = set()
#Function to get the roots of a job
def getRoots(job):
if job not in visited:
visited.add(job)
if len(job._directPredecessors) > 0:
list(map(lambda p : getRoots(p), job._directPredecessors))
else:
roots.add(job)
#The following call ensures we explore all successor edges.
list(map(lambda c : getRoots(c), job._children +
job._followOns))
getRoots(self)
return roots
|
:return: The roots of the connected component of jobs that contains this job. \
A root is a job with no predecessors.
:rtype : set of toil.job.Job instances
|
def splitter(div, *args):
"""
Split text with dividers easily.
:return: newly made value
:rtype: str
:param div: the divider
"""
retstr = ""
if type(div) is int:
div = theArray()[div]
if len(args) == 1:
return args[0]
for s in args:
retstr += s
retstr += "\n"
retstr += div
retstr += "\n"
return retstr
|
Split text with dividers easily.
:return: newly made value
:rtype: str
:param div: the divider
|
def actions(obj, **kwargs):
"""
Return actions available for an object
"""
if 'exclude' in kwargs:
kwargs['exclude'] = kwargs['exclude'].split(',')
actions = obj.get_actions(**kwargs)
if isinstance(actions, dict):
actions = actions.values()
buttons = "".join("%s" % action.render() for action in actions)
return '<div class="actions">%s</div>' % buttons
|
Return actions available for an object
|
def url_report(self, scan_url, apikey):
"""
Send URLS for list of past malicous associations
"""
url = self.base_url + "url/report"
params = {"apikey": apikey, 'resource': scan_url}
rate_limit_clear = self.rate_limit()
if rate_limit_clear:
response = requests.post(url, params=params, headers=self.headers)
if response.status_code == self.HTTP_OK:
json_response = response.json()
return json_response
elif response.status_code == self.HTTP_RATE_EXCEEDED:
time.sleep(20)
else:
self.logger.error("sent: %s, HTTP: %d", scan_url, response.status_code)
time.sleep(self.public_api_sleep_time)
|
Send URLS for list of past malicous associations
|
def log_formatter(request=None):
"""
Log formatter used in our syslog
:param request: a request object
:returns: logging.Formatter
"""
if request:
format_str = ('%(asctime)s {ip} {name}: ENV={env} '
'REMOTE_IP=%(remote_ip)s REQUEST_ID=%(request_id)s '
'%(message)s')
else:
format_str = '%(asctime)s {ip} {name}: ENV={env} %(message)s'
try:
hostname = socket.gethostname()
except socket.gaierror:
hostname = 'localhost'
try:
ip = socket.gethostbyname(hostname)
except socket.gaierror:
ip = '127.0.0.1'
formatter = logging.Formatter(
format_str.format(ip=ip, name=options.name, env=options.env),
datefmt='%Y-%m-%dT%H:%M:%S')
logging.Formatter.converter = time.gmtime
return formatter
|
Log formatter used in our syslog
:param request: a request object
:returns: logging.Formatter
|
def get_forces(self, a=None):
"""Calculate atomic forces."""
if a is None:
a = self.a
forces = np.zeros([len(a), 3], dtype=float)
if self.mask is None:
forces[self.mask] = self.force
else:
forces[:] = self.force
return forces
|
Calculate atomic forces.
|
def read(self, source_path):
"""Parse content and metadata of creole files"""
self._metadata = {}
with pelican_open(source_path) as text:
content = creole2html(text, macros={'header': self._parse_header_macro,
'code': self._parse_code_macro})
return content, self._metadata
|
Parse content and metadata of creole files
|
def get_lookups(cls):
"""Fetch all Lookups"""
class_lookups = [parent.__dict__.get('class_lookups', {}) for parent in inspect.getmro(cls)]
return cls.merge_dicts(class_lookups)
|
Fetch all Lookups
|
def set(self, section, option, value):
"""Set an option value. Knows how to set options properly marked
as secure."""
if not value:
value = '!!False!!'
if self.is_secure_option(section, option):
self.set_secure(section, option, value)
else:
ConfigParser.set(self, section, option, value)
|
Set an option value. Knows how to set options properly marked
as secure.
|
def eklef(fname):
"""
Load an EK file, making it accessible to the EK readers.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/eklef_c.html
:param fname: Name of EK file to load.
:type fname: str
:return: File handle of loaded EK file.
:rtype: int
"""
fname = stypes.stringToCharP(fname)
handle = ctypes.c_int()
libspice.eklef_c(fname, ctypes.byref(handle))
return handle.value
|
Load an EK file, making it accessible to the EK readers.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/eklef_c.html
:param fname: Name of EK file to load.
:type fname: str
:return: File handle of loaded EK file.
:rtype: int
|
def add_property(self, prop):
"""Add a property to an object. The property is an instance of
a Property or one of its derived classes. Adding a property
disconnects it from the collection of properties common to all of the
objects of its class."""
if _debug: Object._debug("add_property %r", prop)
# make a copy of the properties dictionary
self._properties = _copy(self._properties)
# save the property reference and default value (usually None)
self._properties[prop.identifier] = prop
self._values[prop.identifier] = prop.default
|
Add a property to an object. The property is an instance of
a Property or one of its derived classes. Adding a property
disconnects it from the collection of properties common to all of the
objects of its class.
|
def fullversion():
'''
Shows installed version of dnsmasq and compile options.
CLI Example:
.. code-block:: bash
salt '*' dnsmasq.fullversion
'''
cmd = 'dnsmasq -v'
out = __salt__['cmd.run'](cmd).splitlines()
comps = out[0].split()
version_num = comps[2]
comps = out[1].split()
return {'version': version_num,
'compile options': comps[3:]}
|
Shows installed version of dnsmasq and compile options.
CLI Example:
.. code-block:: bash
salt '*' dnsmasq.fullversion
|
def get_devicelist(home_hub_ip='192.168.1.254'):
"""Retrieve data from BT Home Hub 5 and return parsed result.
"""
url = 'http://{}/'.format(home_hub_ip)
try:
response = requests.get(url, timeout=5)
except requests.exceptions.Timeout:
_LOGGER.exception("Connection to the router timed out")
return
if response.status_code == 200:
return parse_devicelist(response.text)
else:
_LOGGER.error("Invalid response from Home Hub: %s", response)
|
Retrieve data from BT Home Hub 5 and return parsed result.
|
def display_google_book(id, page=None, width=700, height=500, **kwargs):
"""Display an embedded version of a Google book.
:param id: the id of the google book to display.
:type id: string
:param page: the start page for the book.
:type id: string or int."""
if isinstance(page, int):
url = 'http://books.google.co.uk/books?id={id}&pg=PA{page}&output=embed'.format(id=id, page=page)
else:
url = 'http://books.google.co.uk/books?id={id}&pg={page}&output=embed'.format(id=id, page=page)
display_iframe_url(url, width=width, height=height, **kwargs)
|
Display an embedded version of a Google book.
:param id: the id of the google book to display.
:type id: string
:param page: the start page for the book.
:type id: string or int.
|
def var(self, ddof=1, *args, **kwargs):
"""
Compute variance of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
nv.validate_groupby_func('var', args, kwargs)
if ddof == 1:
try:
return self._cython_agg_general('var', **kwargs)
except Exception:
f = lambda x: x.var(ddof=ddof, **kwargs)
with _group_selection_context(self):
return self._python_agg_general(f)
else:
f = lambda x: x.var(ddof=ddof, **kwargs)
with _group_selection_context(self):
return self._python_agg_general(f)
|
Compute variance of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : integer, default 1
degrees of freedom
|
def set_alternative(self, experiment_name, alternative):
"""Explicitly set the alternative the user is enrolled in for the specified experiment.
This allows you to change a user between alternatives. The user and goal counts for the new
alternative will be increment, but those for the old one will not be decremented. The user will
be enrolled in the experiment even if the experiment would not normally accept this user."""
experiment = experiment_manager.get_experiment(experiment_name)
if experiment:
self._set_enrollment(experiment, alternative)
|
Explicitly set the alternative the user is enrolled in for the specified experiment.
This allows you to change a user between alternatives. The user and goal counts for the new
alternative will be increment, but those for the old one will not be decremented. The user will
be enrolled in the experiment even if the experiment would not normally accept this user.
|
def extend(self, builder):
"""
Extend the query builder with the needed functions.
:param builder: The query builder
:type builder: eloquent.orm.builder.Builder
"""
for extension in self._extensions:
getattr(self, '_add_%s' % extension)(builder)
builder.on_delete(self._on_delete)
|
Extend the query builder with the needed functions.
:param builder: The query builder
:type builder: eloquent.orm.builder.Builder
|
def attrlist(self):
'Transform the KEY_MAP paramiter into an attrlist for ldap filters'
keymap = self.config.get('KEY_MAP')
if keymap:
# https://github.com/ContinuumIO/flask-ldap-login/issues/11
# https://continuumsupport.zendesk.com/agent/tickets/393
return [s.encode('utf-8') for s in keymap.values()]
else:
return None
|
Transform the KEY_MAP paramiter into an attrlist for ldap filters
|
def clean_slug(self):
"""
Save the old slug to be used later in PageAdmin.save_model()
to make the slug change propagate down the page tree, and clean
leading and trailing slashes which are added on elsewhere.
"""
self.instance._old_slug = self.instance.slug
new_slug = self.cleaned_data['slug']
if not isinstance(self.instance, Link) and new_slug != "/":
new_slug = clean_slashes(self.cleaned_data['slug'])
return new_slug
|
Save the old slug to be used later in PageAdmin.save_model()
to make the slug change propagate down the page tree, and clean
leading and trailing slashes which are added on elsewhere.
|
def print_file_results(file_result):
"""Print the results of validating a file.
Args:
file_result: A FileValidationResults instance.
"""
print_results_header(file_result.filepath, file_result.is_valid)
for object_result in file_result.object_results:
if object_result.warnings:
print_warning_results(object_result, 1)
if object_result.errors:
print_schema_results(object_result, 1)
if file_result.fatal:
print_fatal_results(file_result.fatal, 1)
|
Print the results of validating a file.
Args:
file_result: A FileValidationResults instance.
|
def parse_endpoint_name(self, endpoint):
'''split an endpoint name by colon, as the user can provide an
endpoint name separated from a path:
Parameters
==========
endpoint 12345:/path/on/remote
'''
parts = [x for x in endpoint.split(':') if x]
endpoint = parts[0]
if len(parts) == 1:
path = ''
else:
path = '/'.join(parts[1:])
return endpoint, path
|
split an endpoint name by colon, as the user can provide an
endpoint name separated from a path:
Parameters
==========
endpoint 12345:/path/on/remote
|
def get(self):
"""
*get the PDF*
**Return:**
- ``pdfPath`` -- the path to the generated PDF
"""
self.log.debug('starting the ``get`` method')
# APPEND TO FILENAME?
if not self.append:
self.append = ""
if not self.readability:
pdfPath = self._print_original_webpage()
else:
pdfPath = self._print_parsed_webpage()
tag(
log=self.log,
filepath=pdfPath,
tags="pop",
rating=2,
wherefrom=self.url
)
self.log.debug('completed the ``get`` method')
return pdfPath
|
*get the PDF*
**Return:**
- ``pdfPath`` -- the path to the generated PDF
|
def get_crimes_no_location(self, force, date=None, category=None):
"""
Get crimes with no location for a force. Uses the crimes-no-location_
API call.
.. _crimes-no-location:
https://data.police.uk/docs/method/crimes-no-location/
:rtype: list
:param force: The force to get no-location crimes for.
:type force: str or Force
:param date: The month in which the crimes were reported in the format
``YYYY-MM`` (the latest date is used if ``None``).
:type date: str or None
:param category: The category of the crimes to filter by (either by ID
or CrimeCategory object)
:type category: str or CrimeCategory
:return: A ``list`` of :class:`crime.NoLocationCrime` objects which
were reported in the given month, by the specified force, but
which don't have a location.
"""
if not isinstance(force, Force):
force = Force(self, id=force)
if isinstance(category, CrimeCategory):
category = category.id
kwargs = {
'force': force.id,
'category': category or 'all-crime',
}
crimes = []
if date is not None:
kwargs['date'] = date
for c in self.service.request('GET', 'crimes-no-location', **kwargs):
crimes.append(NoLocationCrime(self, data=c))
return crimes
|
Get crimes with no location for a force. Uses the crimes-no-location_
API call.
.. _crimes-no-location:
https://data.police.uk/docs/method/crimes-no-location/
:rtype: list
:param force: The force to get no-location crimes for.
:type force: str or Force
:param date: The month in which the crimes were reported in the format
``YYYY-MM`` (the latest date is used if ``None``).
:type date: str or None
:param category: The category of the crimes to filter by (either by ID
or CrimeCategory object)
:type category: str or CrimeCategory
:return: A ``list`` of :class:`crime.NoLocationCrime` objects which
were reported in the given month, by the specified force, but
which don't have a location.
|
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
|
this property returns the deployment label dictionary (mainly used by
stage description)
|
def transitDurationCircular(P, R_s, R_p, a, i):
r"""Estimation of the primary transit time. Assumes a circular orbit.
.. math::
T_\text{dur} = \frac{P}{\pi}\sin^{-1}
\left[\frac{R_\star}{a}\frac{\sqrt{(1+k)^2 + b^2}}{\sin{a}} \right]
Where :math:`T_\text{dur}` transit duration, P orbital period,
:math:`R_\star` radius of the star, a is the semi-major axis,
k is :math:`\frac{R_p}{R_s}`, b is :math:`\frac{a}{R_*} \cos{i}`
(Seager & Mallen-Ornelas 2003)
"""
if i is nan:
i = 90 * aq.deg
i = i.rescale(aq.rad)
k = R_p / R_s # lit reference for eclipsing binaries
b = (a * cos(i)) / R_s
duration = (P / pi) * arcsin(((R_s * sqrt((1 + k) **
2 - b ** 2)) / (a * sin(i))).simplified)
return duration.rescale(aq.min)
|
r"""Estimation of the primary transit time. Assumes a circular orbit.
.. math::
T_\text{dur} = \frac{P}{\pi}\sin^{-1}
\left[\frac{R_\star}{a}\frac{\sqrt{(1+k)^2 + b^2}}{\sin{a}} \right]
Where :math:`T_\text{dur}` transit duration, P orbital period,
:math:`R_\star` radius of the star, a is the semi-major axis,
k is :math:`\frac{R_p}{R_s}`, b is :math:`\frac{a}{R_*} \cos{i}`
(Seager & Mallen-Ornelas 2003)
|
def add_bookmark(self, new_bookmark, *, max_retries=3):
"""
Add a bookmark and check whether it was successfully added to the
bookmark list. Already existant bookmarks are not added twice.
:param new_bookmark: the bookmark to add
:type new_bookmark: an instance of :class:`~bookmark_xso.Bookmark`
:param max_retries: the number of retries if setting the bookmark
fails
:type max_retries: :class:`int`
:raises RuntimeError: if the bookmark is not in the bookmark list
after `max_retries` retries.
After setting the bookmark it is checked, whether the bookmark
is in the online storage, if it is not it is tried again at most
`max_retries` times to add the bookmark. A :class:`RuntimeError`
is raised if the bookmark could not be added successfully after
`max_retries`.
"""
with (yield from self._lock):
bookmarks = yield from self._get_bookmarks()
try:
modified_bookmarks = list(bookmarks)
if new_bookmark not in bookmarks:
modified_bookmarks.append(new_bookmark)
yield from self._set_bookmarks(modified_bookmarks)
retries = 0
bookmarks = yield from self._get_bookmarks()
while retries < max_retries:
if new_bookmark in bookmarks:
break
modified_bookmarks = list(bookmarks)
modified_bookmarks.append(new_bookmark)
yield from self._set_bookmarks(modified_bookmarks)
bookmarks = yield from self._get_bookmarks()
retries += 1
if new_bookmark not in bookmarks:
raise RuntimeError("Could not add bookmark")
finally:
self._diff_emit_update(bookmarks)
|
Add a bookmark and check whether it was successfully added to the
bookmark list. Already existant bookmarks are not added twice.
:param new_bookmark: the bookmark to add
:type new_bookmark: an instance of :class:`~bookmark_xso.Bookmark`
:param max_retries: the number of retries if setting the bookmark
fails
:type max_retries: :class:`int`
:raises RuntimeError: if the bookmark is not in the bookmark list
after `max_retries` retries.
After setting the bookmark it is checked, whether the bookmark
is in the online storage, if it is not it is tried again at most
`max_retries` times to add the bookmark. A :class:`RuntimeError`
is raised if the bookmark could not be added successfully after
`max_retries`.
|
def join(self, column_label, other, other_label=None):
"""Creates a new table with the columns of self and other, containing
rows for all values of a column that appear in both tables.
Args:
``column_label`` (``str``): label of column in self that is used to
join rows of ``other``.
``other``: Table object to join with self on matching values of
``column_label``.
Kwargs:
``other_label`` (``str``): default None, assumes ``column_label``.
Otherwise in ``other`` used to join rows.
Returns:
New table self joined with ``other`` by matching values in
``column_label`` and ``other_label``. If the resulting join is
empty, returns None.
>>> table = Table().with_columns('a', make_array(9, 3, 3, 1),
... 'b', make_array(1, 2, 2, 10),
... 'c', make_array(3, 4, 5, 6))
>>> table
a | b | c
9 | 1 | 3
3 | 2 | 4
3 | 2 | 5
1 | 10 | 6
>>> table2 = Table().with_columns( 'a', make_array(9, 1, 1, 1),
... 'd', make_array(1, 2, 2, 10),
... 'e', make_array(3, 4, 5, 6))
>>> table2
a | d | e
9 | 1 | 3
1 | 2 | 4
1 | 2 | 5
1 | 10 | 6
>>> table.join('a', table2)
a | b | c | d | e
1 | 10 | 6 | 2 | 4
1 | 10 | 6 | 2 | 5
1 | 10 | 6 | 10 | 6
9 | 1 | 3 | 1 | 3
>>> table.join('a', table2, 'a') # Equivalent to previous join
a | b | c | d | e
1 | 10 | 6 | 2 | 4
1 | 10 | 6 | 2 | 5
1 | 10 | 6 | 10 | 6
9 | 1 | 3 | 1 | 3
>>> table.join('a', table2, 'd') # Repeat column labels relabeled
a | b | c | a_2 | e
1 | 10 | 6 | 9 | 3
>>> table2 #table2 has three rows with a = 1
a | d | e
9 | 1 | 3
1 | 2 | 4
1 | 2 | 5
1 | 10 | 6
>>> table #table has only one row with a = 1
a | b | c
9 | 1 | 3
3 | 2 | 4
3 | 2 | 5
1 | 10 | 6
"""
if self.num_rows == 0 or other.num_rows == 0:
return None
if not other_label:
other_label = column_label
self_rows = self.index_by(column_label)
other_rows = other.index_by(other_label)
# Gather joined rows from self_rows that have join values in other_rows
joined_rows = []
for v, rows in self_rows.items():
if v in other_rows:
joined_rows += [row + o for row in rows for o in other_rows[v]]
if not joined_rows:
return None
# Build joined table
self_labels = list(self.labels)
other_labels = [self._unused_label(s) for s in other.labels]
other_labels_map = dict(zip(other.labels, other_labels))
joined = type(self)(self_labels + other_labels).with_rows(joined_rows)
# Copy formats from both tables
joined._formats.update(self._formats)
for label in other._formats:
joined._formats[other_labels_map[label]] = other._formats[label]
# Remove redundant column, but perhaps save its formatting
del joined[other_labels_map[other_label]]
if column_label not in self._formats and other_label in other._formats:
joined._formats[column_label] = other._formats[other_label]
return joined.move_to_start(column_label).sort(column_label)
|
Creates a new table with the columns of self and other, containing
rows for all values of a column that appear in both tables.
Args:
``column_label`` (``str``): label of column in self that is used to
join rows of ``other``.
``other``: Table object to join with self on matching values of
``column_label``.
Kwargs:
``other_label`` (``str``): default None, assumes ``column_label``.
Otherwise in ``other`` used to join rows.
Returns:
New table self joined with ``other`` by matching values in
``column_label`` and ``other_label``. If the resulting join is
empty, returns None.
>>> table = Table().with_columns('a', make_array(9, 3, 3, 1),
... 'b', make_array(1, 2, 2, 10),
... 'c', make_array(3, 4, 5, 6))
>>> table
a | b | c
9 | 1 | 3
3 | 2 | 4
3 | 2 | 5
1 | 10 | 6
>>> table2 = Table().with_columns( 'a', make_array(9, 1, 1, 1),
... 'd', make_array(1, 2, 2, 10),
... 'e', make_array(3, 4, 5, 6))
>>> table2
a | d | e
9 | 1 | 3
1 | 2 | 4
1 | 2 | 5
1 | 10 | 6
>>> table.join('a', table2)
a | b | c | d | e
1 | 10 | 6 | 2 | 4
1 | 10 | 6 | 2 | 5
1 | 10 | 6 | 10 | 6
9 | 1 | 3 | 1 | 3
>>> table.join('a', table2, 'a') # Equivalent to previous join
a | b | c | d | e
1 | 10 | 6 | 2 | 4
1 | 10 | 6 | 2 | 5
1 | 10 | 6 | 10 | 6
9 | 1 | 3 | 1 | 3
>>> table.join('a', table2, 'd') # Repeat column labels relabeled
a | b | c | a_2 | e
1 | 10 | 6 | 9 | 3
>>> table2 #table2 has three rows with a = 1
a | d | e
9 | 1 | 3
1 | 2 | 4
1 | 2 | 5
1 | 10 | 6
>>> table #table has only one row with a = 1
a | b | c
9 | 1 | 3
3 | 2 | 4
3 | 2 | 5
1 | 10 | 6
|
def udom83(text: str) -> str:
"""
Udom83 - It's a Thai soundex rule.
:param str text: Thai word
:return: Udom83 soundex
"""
if not text or not isinstance(text, str):
return ""
text = _RE_1.sub("ัน\\1", text)
text = _RE_2.sub("ั\\1", text)
text = _RE_3.sub("ัน\\1", text)
text = _RE_4.sub("ัน", text)
text = _RE_5.sub("\\1", text)
text = _RE_6.sub("\\1ย", text)
text = _RE_7.sub("ม\\1", text)
text = _RE_8.sub("ม", text)
text = _RE_9.sub("ม", text)
text = _RE_10.sub("", text)
text = _RE_11.sub("", text)
if not text:
return ""
sd = text[0].translate(_TRANS1)
sd += text[1:].translate(_TRANS2)
return (sd + "000000")[:7]
|
Udom83 - It's a Thai soundex rule.
:param str text: Thai word
:return: Udom83 soundex
|
def update(self):
"""Get info from dataset before opening dialog."""
self.filename = self.parent.info.dataset.filename
self.chan = self.parent.info.dataset.header['chan_name']
for chan in self.chan:
self.idx_chan.addItem(chan)
|
Get info from dataset before opening dialog.
|
def get_plugin_folders():
"""Get linkchecker plugin folders. Default is ~/.linkchecker/plugins/."""
folders = []
defaultfolder = normpath("~/.linkchecker/plugins")
if not os.path.exists(defaultfolder) and not Portable:
try:
make_userdir(defaultfolder)
except Exception as errmsg:
msg = _("could not create plugin directory %(dirname)r: %(errmsg)r")
args = dict(dirname=defaultfolder, errmsg=errmsg)
log.warn(LOG_CHECK, msg % args)
if os.path.exists(defaultfolder):
folders.append(defaultfolder)
return folders
|
Get linkchecker plugin folders. Default is ~/.linkchecker/plugins/.
|
def to_gds(self, multiplier):
"""
Convert this object to a GDSII element.
Parameters
----------
multiplier : number
A number that multiplies all dimensions written in the GDSII
element.
Returns
-------
out : string
The GDSII binary string that represents this object.
"""
name = self.ref_cell.name
if len(name) % 2 != 0:
name = name + '\0'
data = struct.pack('>4h', 4, 0x0A00, 4 + len(name),
0x1206) + name.encode('ascii')
if (self.rotation is not None) or (self.magnification is
not None) or self.x_reflection:
word = 0
values = b''
if self.x_reflection:
word += 0x8000
if not (self.magnification is None):
# This flag indicates that the magnification is absolute, not
# relative (not supported).
#word += 0x0004
values += struct.pack('>2h', 12, 0x1B05) + _eight_byte_real(
self.magnification)
if not (self.rotation is None):
# This flag indicates that the rotation is absolute, not
# relative (not supported).
#word += 0x0002
values += struct.pack('>2h', 12, 0x1C05) + _eight_byte_real(
self.rotation)
data += struct.pack('>2hH', 6, 0x1A01, word) + values
return data + struct.pack(
'>2h2l2h', 12, 0x1003, int(round(self.origin[0] * multiplier)),
int(round(self.origin[1] * multiplier)), 4, 0x1100)
|
Convert this object to a GDSII element.
Parameters
----------
multiplier : number
A number that multiplies all dimensions written in the GDSII
element.
Returns
-------
out : string
The GDSII binary string that represents this object.
|
def get_cgroup_container_metadata():
"""
Reads docker/kubernetes metadata (container id, pod id) from /proc/self/cgroup
The result is a nested dictionary with the detected IDs, e.g.
{
"container": {"id": "2227daf62df6694645fee5df53c1f91271546a9560e8600a525690ae252b7f63"},
"pod": {"uid": "90d81341_92de_11e7_8cf2_507b9d4141fa"}
}
:return: a dictionary with the detected ids or {}
"""
if not os.path.exists(CGROUP_PATH):
return {}
with open(CGROUP_PATH) as f:
return parse_cgroups(f) or {}
|
Reads docker/kubernetes metadata (container id, pod id) from /proc/self/cgroup
The result is a nested dictionary with the detected IDs, e.g.
{
"container": {"id": "2227daf62df6694645fee5df53c1f91271546a9560e8600a525690ae252b7f63"},
"pod": {"uid": "90d81341_92de_11e7_8cf2_507b9d4141fa"}
}
:return: a dictionary with the detected ids or {}
|
def main():
"""main
The entrypoint function. This function should also handle any runtime
errors and exceptions in a cleanly fashon.
"""
try:
args = get_args()
if 'setup_cfg' in args and 'stdeb_cfg' in args:
_construct_cfgs_from_json(args)
else:
construct_cfgs(**args)
except Exception as Error:
print(str(Error))
print("Exiting cleanly...")
exit_cleanly()
exit_cleanly(error_number=0)
|
main
The entrypoint function. This function should also handle any runtime
errors and exceptions in a cleanly fashon.
|
def setup(options):
"""Initialize debug/logging in third party libraries correctly.
Args:
options (:class:`nyawc.Options`): The options to use for the current crawling runtime.
"""
if not options.misc.debug:
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecureRequestWarning
)
|
Initialize debug/logging in third party libraries correctly.
Args:
options (:class:`nyawc.Options`): The options to use for the current crawling runtime.
|
def parse_limit(limit_def):
"""Parse a structured flux limit definition as obtained from a YAML file
Returns a tuple of reaction, lower and upper bound.
"""
lower, upper = get_limits(limit_def)
reaction = limit_def.get('reaction')
return reaction, lower, upper
|
Parse a structured flux limit definition as obtained from a YAML file
Returns a tuple of reaction, lower and upper bound.
|
def replace_discount_coupon_by_id(cls, discount_coupon_id, discount_coupon, **kwargs):
"""Replace DiscountCoupon
Replace all attributes of DiscountCoupon
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_discount_coupon_by_id(discount_coupon_id, discount_coupon, async=True)
>>> result = thread.get()
:param async bool
:param str discount_coupon_id: ID of discountCoupon to replace (required)
:param DiscountCoupon discount_coupon: Attributes of discountCoupon to replace (required)
:return: DiscountCoupon
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_discount_coupon_by_id_with_http_info(discount_coupon_id, discount_coupon, **kwargs)
else:
(data) = cls._replace_discount_coupon_by_id_with_http_info(discount_coupon_id, discount_coupon, **kwargs)
return data
|
Replace DiscountCoupon
Replace all attributes of DiscountCoupon
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_discount_coupon_by_id(discount_coupon_id, discount_coupon, async=True)
>>> result = thread.get()
:param async bool
:param str discount_coupon_id: ID of discountCoupon to replace (required)
:param DiscountCoupon discount_coupon: Attributes of discountCoupon to replace (required)
:return: DiscountCoupon
If the method is called asynchronously,
returns the request thread.
|
def _set_fields(self, fields):
"""Set or update the fields value."""
super(_BaseHNVModel, self)._set_fields(fields)
if not self.resource_ref:
endpoint = self._endpoint.format(
resource_id=self.resource_id, parent_id=self.parent_id,
grandparent_id=self.grandparent_id)
self.resource_ref = re.sub("(/networking/v[0-9]+)", "", endpoint)
|
Set or update the fields value.
|
def read_dir(input_dir,input_ext,func):
'''reads all files with extension input_ext
in a directory input_dir and apply function func
to their contents'''
import os
for dirpath, dnames, fnames in os.walk(input_dir):
for fname in fnames:
if not dirpath.endswith(os.sep):
dirpath = dirpath + os.sep
if fname.endswith(input_ext):
func(read_file(dirpath + fname))
|
reads all files with extension input_ext
in a directory input_dir and apply function func
to their contents
|
def invoked_with(self):
"""Similar to :attr:`Context.invoked_with` except properly handles
the case where :meth:`Context.send_help` is used.
If the help command was used regularly then this returns
the :attr:`Context.invoked_with` attribute. Otherwise, if
it the help command was called using :meth:`Context.send_help`
then it returns the internal command name of the help command.
Returns
---------
:class:`str`
The command name that triggered this invocation.
"""
command_name = self._command_impl.name
ctx = self.context
if ctx is None or ctx.command is None or ctx.command.qualified_name != command_name:
return command_name
return ctx.invoked_with
|
Similar to :attr:`Context.invoked_with` except properly handles
the case where :meth:`Context.send_help` is used.
If the help command was used regularly then this returns
the :attr:`Context.invoked_with` attribute. Otherwise, if
it the help command was called using :meth:`Context.send_help`
then it returns the internal command name of the help command.
Returns
---------
:class:`str`
The command name that triggered this invocation.
|
def purge_network(network_id, purge_data,**kwargs):
"""
Remove a network from DB completely
Use purge_data to try to delete the data associated with only this network.
If no other resources link to this data, it will be deleted.
"""
user_id = kwargs.get('user_id')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
log.info("Deleting network %s, id=%s", net_i.name, network_id)
net_i.check_write_permission(user_id)
db.DBSession.delete(net_i)
db.DBSession.flush()
return 'OK'
|
Remove a network from DB completely
Use purge_data to try to delete the data associated with only this network.
If no other resources link to this data, it will be deleted.
|
def virtualchain_set_opfields( op, **fields ):
"""
Pass along virtualchain-reserved fields to a virtualchain operation.
This layer of indirection is meant to help with future compatibility,
so virtualchain implementations do not try to set operation fields
directly.
"""
# warn about unsupported fields
for f in fields.keys():
if f not in indexer.RESERVED_KEYS:
log.warning("Unsupported virtualchain field '%s'" % f)
# propagate reserved fields
for f in fields.keys():
if f in indexer.RESERVED_KEYS:
op[f] = fields[f]
return op
|
Pass along virtualchain-reserved fields to a virtualchain operation.
This layer of indirection is meant to help with future compatibility,
so virtualchain implementations do not try to set operation fields
directly.
|
def one_step(self, current_state, previous_kernel_results):
"""Runs one iteration of the Elliptical Slice Sampler.
Args:
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s). The first `r` dimensions
index independent chains,
`r = tf.rank(log_likelihood_fn(*normal_sampler_fn()))`.
previous_kernel_results: `collections.namedtuple` containing `Tensor`s
representing values from previous calls to this function (or from the
`bootstrap_results` function.)
Returns:
next_state: Tensor or Python list of `Tensor`s representing the state(s)
of the Markov chain(s) after taking exactly one step. Has same type and
shape as `current_state`.
kernel_results: `collections.namedtuple` of internal calculations used to
advance the chain.
Raises:
TypeError: if `not log_likelihood.dtype.is_floating`.
"""
with tf.compat.v1.name_scope(
name=mcmc_util.make_name(self.name, 'elliptical_slice', 'one_step'),
values=[self._seed_stream,
current_state,
previous_kernel_results.log_likelihood]):
with tf.compat.v1.name_scope('initialize'):
[
init_state_parts,
init_log_likelihood
] = _prepare_args(
self.log_likelihood_fn,
current_state,
previous_kernel_results.log_likelihood)
normal_samples = self.normal_sampler_fn(self._seed_stream()) # pylint: disable=not-callable
normal_samples = list(normal_samples) if mcmc_util.is_list_like(
normal_samples) else [normal_samples]
u = tf.random.uniform(
shape=tf.shape(init_log_likelihood),
seed=self._seed_stream(),
dtype=init_log_likelihood.dtype.base_dtype,
)
threshold = init_log_likelihood + tf.math.log(u)
starting_angle = tf.random.uniform(
shape=tf.shape(init_log_likelihood),
minval=0.,
maxval=2 * np.pi,
name='angle',
seed=self._seed_stream(),
dtype=init_log_likelihood.dtype.base_dtype,
)
starting_angle_min = starting_angle - 2 * np.pi
starting_angle_max = starting_angle
starting_state_parts = _rotate_on_ellipse(
init_state_parts, normal_samples, starting_angle)
starting_log_likelihood = self.log_likelihood_fn(*starting_state_parts) # pylint: disable=not-callable
def chain_not_done(
angle,
angle_min,
angle_max,
current_state_parts,
current_log_likelihood):
del angle, angle_min, angle_max, current_state_parts
return tf.reduce_any(current_log_likelihood < threshold)
def sample_next_angle(
angle,
angle_min,
angle_max,
current_state_parts,
current_log_likelihood):
"""Slice sample a new angle, and rotate init_state by that amount."""
chain_not_done = current_log_likelihood < threshold
# Box in on angle. Only update angles for which we haven't generated a
# point that beats the threshold.
angle_min = tf.where(
tf.math.logical_and(angle < 0, chain_not_done),
angle,
angle_min)
angle_max = tf.where(
tf.math.logical_and(angle >= 0, chain_not_done),
angle,
angle_max)
new_angle = tf.random.uniform(
shape=tf.shape(current_log_likelihood),
minval=angle_min,
maxval=angle_max,
seed=self._seed_stream(),
dtype=angle.dtype.base_dtype
)
angle = tf.where(chain_not_done, new_angle, angle)
next_state_parts = _rotate_on_ellipse(
init_state_parts, normal_samples, angle)
new_state_parts = []
broadcasted_chain_not_done = _right_pad_with_ones(
chain_not_done, tf.rank(next_state_parts[0]))
for n_state, c_state in zip(next_state_parts, current_state_parts):
new_state_part = tf.where(
tf.broadcast_to(
broadcasted_chain_not_done,
tf.shape(n_state)),
n_state,
c_state)
new_state_parts.append(new_state_part)
return (
angle,
angle_min,
angle_max,
new_state_parts,
self.log_likelihood_fn(*new_state_parts) # pylint: disable=not-callable
)
[
next_angle,
_,
_,
next_state_parts,
next_log_likelihood,
] = tf.while_loop(
cond=chain_not_done,
body=sample_next_angle,
loop_vars=[
starting_angle,
starting_angle_min,
starting_angle_max,
starting_state_parts,
starting_log_likelihood
])
return [
next_state_parts if mcmc_util.is_list_like(
current_state) else next_state_parts[0],
EllipticalSliceSamplerKernelResults(
log_likelihood=next_log_likelihood,
angle=next_angle,
normal_samples=normal_samples,
),
]
|
Runs one iteration of the Elliptical Slice Sampler.
Args:
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s). The first `r` dimensions
index independent chains,
`r = tf.rank(log_likelihood_fn(*normal_sampler_fn()))`.
previous_kernel_results: `collections.namedtuple` containing `Tensor`s
representing values from previous calls to this function (or from the
`bootstrap_results` function.)
Returns:
next_state: Tensor or Python list of `Tensor`s representing the state(s)
of the Markov chain(s) after taking exactly one step. Has same type and
shape as `current_state`.
kernel_results: `collections.namedtuple` of internal calculations used to
advance the chain.
Raises:
TypeError: if `not log_likelihood.dtype.is_floating`.
|
async def fetch_wallet_search_next_records(wallet_handle: int,
wallet_search_handle: int,
count: int) -> str:
"""
Fetch next records for wallet search.
:param wallet_handle: wallet handler (created by open_wallet).
:param wallet_search_handle: wallet wallet handle (created by open_wallet_search)
:param count: Count of records to fetch
:return: wallet records json:
{
totalCount: <str>, // present only if retrieveTotalCount set to true
records: [{ // present only if retrieveRecords set to true
id: "Some id",
type: "Some type", // present only if retrieveType set to true
value: "Some value", // present only if retrieveValue set to true
tags: <tags json>, // present only if retrieveTags set to true
}],
}
"""
logger = logging.getLogger(__name__)
logger.debug("fetch_wallet_search_next_records: >>> wallet_handle: %r, wallet_search_handle: %r, count: %r",
wallet_handle,
wallet_search_handle,
count)
if not hasattr(fetch_wallet_search_next_records, "cb"):
logger.debug("fetch_wallet_search_next_records: Creating callback")
fetch_wallet_search_next_records.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_wallet_handle = c_int32(wallet_handle)
c_wallet_search_handle = c_int32(wallet_search_handle)
c_count = c_uint(count)
records_json = await do_call('indy_fetch_wallet_search_next_records',
c_wallet_handle,
c_wallet_search_handle,
c_count,
fetch_wallet_search_next_records.cb)
res = records_json.decode()
logger.debug("fetch_wallet_search_next_records: <<< res: %r", res)
return res
|
Fetch next records for wallet search.
:param wallet_handle: wallet handler (created by open_wallet).
:param wallet_search_handle: wallet wallet handle (created by open_wallet_search)
:param count: Count of records to fetch
:return: wallet records json:
{
totalCount: <str>, // present only if retrieveTotalCount set to true
records: [{ // present only if retrieveRecords set to true
id: "Some id",
type: "Some type", // present only if retrieveType set to true
value: "Some value", // present only if retrieveValue set to true
tags: <tags json>, // present only if retrieveTags set to true
}],
}
|
def _plot_transform_pairs(fCI, r, k, axes, tit):
r"""Plot the input transform pairs."""
# Plot lhs
plt.sca(axes[0])
plt.title('|' + tit + ' lhs|')
for f in fCI:
if f.name == 'j2':
lhs = f.lhs(k)
plt.loglog(k, np.abs(lhs[0]), lw=2, label='j0')
plt.loglog(k, np.abs(lhs[1]), lw=2, label='j1')
else:
plt.loglog(k, np.abs(f.lhs(k)), lw=2, label=f.name)
if tit != 'fC':
plt.xlabel('l')
plt.legend(loc='best')
# Plot rhs
plt.sca(axes[1])
plt.title('|' + tit + ' rhs|')
# Transform pair rhs
for f in fCI:
if tit == 'fC':
plt.loglog(r, np.abs(f.rhs), lw=2, label=f.name)
else:
plt.loglog(r, np.abs(f.rhs(r)), lw=2, label=f.name)
# Transform with Key
for f in fCI:
if f.name[1] in ['0', '1', '2']:
filt = j0j1filt()
else:
filt = sincosfilt()
kk = filt.base/r[:, None]
if f.name == 'j2':
lhs = f.lhs(kk)
kr0 = np.dot(lhs[0], getattr(filt, 'j0'))/r
kr1 = np.dot(lhs[1], getattr(filt, 'j1'))/r**2
kr = kr0+kr1
else:
kr = np.dot(f.lhs(kk), getattr(filt, f.name))/r
plt.loglog(r, np.abs(kr), '-.', lw=2, label=filt.name)
if tit != 'fC':
plt.xlabel('r')
plt.legend(loc='best')
|
r"""Plot the input transform pairs.
|
def folderitem(self, obj, item, index):
"""Applies new properties to the item (analysis) that is currently
being rendered as a row in the list.
:param obj: analysis to be rendered as a row in the list
:param item: dict representation of the analysis, suitable for the list
:param index: current position of the item within the list
:type obj: ATContentType/DexterityContentType
:type item: dict
:type index: int
:return: the dict representation of the item
:rtype: dict
"""
item = super(AnalysesView, self).folderitem(obj, item, index)
item_obj = api.get_object(obj)
uid = item["uid"]
# Slot is the row position where all analyses sharing the same parent
# (eg. AnalysisRequest, SampleReference), will be displayed as a group
slot = self.get_item_slot(uid)
item["Pos"] = slot
# The position string contains both the slot + the position of the
# analysis within the slot: "position_sortkey" will be used to sort all
# the analyses to be displayed in the list
str_position = self.uids_strpositions[uid]
item["pos_sortkey"] = str_position
item["colspan"] = {"Pos": 1}
item["Service"] = item_obj.Title()
item["Category"] = item_obj.getCategoryTitle()
item["DueDate"] = self.ulocalized_time(item_obj, long_format=0)
item["class"]["Service"] = "service_title"
# To prevent extra loops, we compute here the number of analyses to be
# rendered within each slot. This information will be useful later for
# applying rowspan to the first cell of each slot, that contains info
# about the parent of all the analyses contained in that slot (e.g
# Analysis Request ID, Sample Type, etc.)
rowspans = self.items_rowspans.get(slot, 0) + 1
remarks_enabled = self.is_analysis_remarks_enabled()
if remarks_enabled:
# Increase in one unit the rowspan, cause the comment field for
# this analysis will be rendered in a new row, below the row that
# displays the current item
rowspans = rowspans + 1
# We map this rowspan information in items_rowspan, that will be used
# later during the rendereing of slot headers (first cell of each row)
self.items_rowspans[slot] = rowspans
return item
|
Applies new properties to the item (analysis) that is currently
being rendered as a row in the list.
:param obj: analysis to be rendered as a row in the list
:param item: dict representation of the analysis, suitable for the list
:param index: current position of the item within the list
:type obj: ATContentType/DexterityContentType
:type item: dict
:type index: int
:return: the dict representation of the item
:rtype: dict
|
def _get_state(self):
"""
Returns the VM state (e.g. running, paused etc.)
:returns: state (string)
"""
result = yield from self._execute("showvminfo", [self._vmname, "--machinereadable"])
for info in result.splitlines():
if '=' in info:
name, value = info.split('=', 1)
if name == "VMState":
return value.strip('"')
return "unknown"
|
Returns the VM state (e.g. running, paused etc.)
:returns: state (string)
|
def get_ip_interface_output_interface_if_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_ip_interface = ET.Element("get_ip_interface")
config = get_ip_interface
output = ET.SubElement(get_ip_interface, "output")
interface = ET.SubElement(output, "interface")
interface_type_key = ET.SubElement(interface, "interface-type")
interface_type_key.text = kwargs.pop('interface_type')
interface_name_key = ET.SubElement(interface, "interface-name")
interface_name_key.text = kwargs.pop('interface_name')
if_state = ET.SubElement(interface, "if-state")
if_state.text = kwargs.pop('if_state')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def errmsg(self, message, opts={}):
""" Convenience short-hand for self.intf[-1].errmsg """
if 'plain' != self.debugger.settings['highlight']:
message = colorize('standout', message)
pass
return(self.intf[-1].errmsg(message))
|
Convenience short-hand for self.intf[-1].errmsg
|
def start(self):
"""
Start the installation wizard
"""
self.log.debug('Starting the installation process')
self.browser.open(self.url)
self.system_check()
|
Start the installation wizard
|
def attach(cls, name, vhost, remote_name):
"""Attach an instance's vhost to a remote from the local repository."""
paas_access = cls.get('paas_access')
if not paas_access:
paas_info = cls.info(name)
paas_access = '%s@%s' \
% (paas_info['user'], paas_info['git_server'])
remote_url = 'ssh+git://%s/%s.git' % (paas_access, vhost)
ret = cls.execute('git remote add %s %s' % (remote_name, remote_url,))
if ret:
cls.echo('Added remote `%s` to your local git repository.'
% (remote_name))
cls.echo('Use `git push %s master` to push your code to the '
'instance.' % (remote_name))
cls.echo('Then `$ gandi deploy` to build and deploy your '
'application.')
|
Attach an instance's vhost to a remote from the local repository.
|
def handle_401(self, response, repo, **kwargs):
"""Fetch Bearer token and retry."""
if response.status_code != requests.codes.unauthorized:
return response
auth_info = response.headers.get('www-authenticate', '')
if 'bearer' not in auth_info.lower():
return response
self._token_cache[repo] = self._get_token(auth_info, repo)
# Consume content and release the original connection
# to allow our new request to reuse the same one.
# This pattern was inspired by the source code of requests.auth.HTTPDigestAuth
response.content
response.close()
retry_request = response.request.copy()
extract_cookies_to_jar(retry_request._cookies, response.request, response.raw)
retry_request.prepare_cookies(retry_request._cookies)
self._set_header(retry_request, repo)
retry_response = response.connection.send(retry_request, **kwargs)
retry_response.history.append(response)
retry_response.request = retry_request
return retry_response
|
Fetch Bearer token and retry.
|
def create_mixin(self):
"""
This will create the custom Model Mixin to attach to your custom field
enabled model.
:return:
"""
_builder = self
class CustomModelMixin(object):
@cached_property
def _content_type(self):
return ContentType.objects.get_for_model(self)
@classmethod
def get_model_custom_fields(cls):
""" Return a list of custom fields for this model, callable at model level """
return _builder.fields_model_class.objects.filter(content_type=ContentType.objects.get_for_model(cls))
def get_custom_fields(self):
""" Return a list of custom fields for this model """
return _builder.fields_model_class.objects.filter(content_type=self._content_type)
def get_custom_value(self, field):
""" Get a value for a specified custom field """
return _builder.values_model_class.objects.get(custom_field=field,
content_type=self._content_type,
object_id=self.pk)
def set_custom_value(self, field, value):
""" Set a value for a specified custom field """
custom_value, created = \
_builder.values_model_class.objects.get_or_create(custom_field=field,
content_type=self._content_type,
object_id=self.pk)
custom_value.value = value
custom_value.full_clean()
custom_value.save()
return custom_value
#def __getattr__(self, name):
# """ Get a value for a specified custom field """
# try:
# obj = _builder.values_model_class.objects.get(custom_field__name=name,
# content_type=self._content_type,
# object_id=self.pk)
# return obj.value
# except ObjectDoesNotExist:
# pass
# return super(CustomModelMixin, self).__getattr__(name)
return CustomModelMixin
|
This will create the custom Model Mixin to attach to your custom field
enabled model.
:return:
|
def create_model(self,
base_model_id,
forced_glossary=None,
parallel_corpus=None,
name=None,
**kwargs):
"""
Create model.
Uploads Translation Memory eXchange (TMX) files to customize a translation model.
You can either customize a model with a forced glossary or with a corpus that
contains parallel sentences. To create a model that is customized with a parallel
corpus <b>and</b> a forced glossary, proceed in two steps: customize with a
parallel corpus first and then customize the resulting model with a glossary.
Depending on the type of customization and the size of the uploaded corpora,
training can range from minutes for a glossary to several hours for a large
parallel corpus. You can upload a single forced glossary file and this file must
be less than <b>10 MB</b>. You can upload multiple parallel corpora tmx files. The
cumulative file size of all uploaded files is limited to <b>250 MB</b>. To
successfully train with a parallel corpus you must have at least <b>5,000 parallel
sentences</b> in your corpus.
You can have a <b>maxium of 10 custom models per language pair</b>.
:param str base_model_id: The model ID of the model to use as the base for
customization. To see available models, use the `List models` method. Usually all
IBM provided models are customizable. In addition, all your models that have been
created via parallel corpus customization, can be further customized with a forced
glossary.
:param file forced_glossary: A TMX file with your customizations. The
customizations in the file completely overwrite the domain translaton data,
including high frequency or high confidence phrase translations. You can upload
only one glossary with a file size less than 10 MB per call. A forced glossary
should contain single words or short phrases.
:param file parallel_corpus: A TMX file with parallel sentences for source and
target language. You can upload multiple parallel_corpus files in one request. All
uploaded parallel_corpus files combined, your parallel corpus must contain at
least 5,000 parallel sentences to train successfully.
:param str name: An optional model name that you can use to identify the model.
Valid characters are letters, numbers, dashes, underscores, spaces and
apostrophes. The maximum length is 32 characters.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if base_model_id is None:
raise ValueError('base_model_id must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('language_translator', 'V3',
'create_model')
headers.update(sdk_headers)
params = {
'version': self.version,
'base_model_id': base_model_id,
'name': name
}
form_data = {}
if forced_glossary:
form_data['forced_glossary'] = (None, forced_glossary,
'application/octet-stream')
if parallel_corpus:
form_data['parallel_corpus'] = (None, parallel_corpus,
'application/octet-stream')
url = '/v3/models'
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
files=form_data,
accept_json=True)
return response
|
Create model.
Uploads Translation Memory eXchange (TMX) files to customize a translation model.
You can either customize a model with a forced glossary or with a corpus that
contains parallel sentences. To create a model that is customized with a parallel
corpus <b>and</b> a forced glossary, proceed in two steps: customize with a
parallel corpus first and then customize the resulting model with a glossary.
Depending on the type of customization and the size of the uploaded corpora,
training can range from minutes for a glossary to several hours for a large
parallel corpus. You can upload a single forced glossary file and this file must
be less than <b>10 MB</b>. You can upload multiple parallel corpora tmx files. The
cumulative file size of all uploaded files is limited to <b>250 MB</b>. To
successfully train with a parallel corpus you must have at least <b>5,000 parallel
sentences</b> in your corpus.
You can have a <b>maxium of 10 custom models per language pair</b>.
:param str base_model_id: The model ID of the model to use as the base for
customization. To see available models, use the `List models` method. Usually all
IBM provided models are customizable. In addition, all your models that have been
created via parallel corpus customization, can be further customized with a forced
glossary.
:param file forced_glossary: A TMX file with your customizations. The
customizations in the file completely overwrite the domain translaton data,
including high frequency or high confidence phrase translations. You can upload
only one glossary with a file size less than 10 MB per call. A forced glossary
should contain single words or short phrases.
:param file parallel_corpus: A TMX file with parallel sentences for source and
target language. You can upload multiple parallel_corpus files in one request. All
uploaded parallel_corpus files combined, your parallel corpus must contain at
least 5,000 parallel sentences to train successfully.
:param str name: An optional model name that you can use to identify the model.
Valid characters are letters, numbers, dashes, underscores, spaces and
apostrophes. The maximum length is 32 characters.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
|
def reply_inform(self, inform, orig_req):
"""Send an inform as part of the reply to an earlier request.
Parameters
----------
inform : Message object
The inform message to send.
orig_req : Message object
The request message being replied to. The inform message's
id is overridden with the id from orig_req before the
inform is sent.
"""
assert (inform.mtype == Message.INFORM)
assert (inform.name == orig_req.name)
inform.mid = orig_req.mid
return self._send_message(inform)
|
Send an inform as part of the reply to an earlier request.
Parameters
----------
inform : Message object
The inform message to send.
orig_req : Message object
The request message being replied to. The inform message's
id is overridden with the id from orig_req before the
inform is sent.
|
def getAxisNames(self):
"""
Collect a set of axis names from all deltas.
"""
s = {}
for l, x in self.items():
s.update(dict.fromkeys([k for k, v in l], None))
return set(s.keys())
|
Collect a set of axis names from all deltas.
|
def drag(self, NewPt):
# //Mouse drag, calculate rotation (Point2fT Quat4fT)
""" drag (Point2fT mouse_coord) -> new_quaternion_rotation_vec
"""
X = 0
Y = 1
Z = 2
W = 3
self.m_EnVec = self._mapToSphere(NewPt)
# //Compute the vector perpendicular to the begin and end vectors
# Perp = Vector3fT()
Perp = Vector3fCross(self.m_StVec, self.m_EnVec)
NewRot = Quat4fT()
# //Compute the length of the perpendicular vector
if Vector3fLength(Perp) > Epsilon: # //if its non-zero
# //We're ok, so return the perpendicular vector as the transform after all
NewRot[X] = Perp[X]
NewRot[Y] = Perp[Y]
NewRot[Z] = Perp[Z]
# //In the quaternion values, w is cosine(theta / 2), where theta is rotation angle
NewRot[W] = Vector3fDot(self.m_StVec, self.m_EnVec)
else: # //if its zero
# //The begin and end vectors coincide, so return a quaternion of zero matrix (no rotation)
NewRot[X] = NewRot[Y] = NewRot[Z] = NewRot[W] = 0.0
return NewRot
|
drag (Point2fT mouse_coord) -> new_quaternion_rotation_vec
|
def channel_interpolate(layer1, n_channel1, layer2, n_channel2):
"""Interpolate between layer1, n_channel1 and layer2, n_channel2.
Optimize for a convex combination of layer1, n_channel1 and
layer2, n_channel2, transitioning across the batch.
Args:
layer1: layer to optimize 100% at batch=0.
n_channel1: neuron index to optimize 100% at batch=0.
layer2: layer to optimize 100% at batch=N.
n_channel2: neuron index to optimize 100% at batch=N.
Returns:
Objective
"""
def inner(T):
batch_n = T(layer1).get_shape().as_list()[0]
arr1 = T(layer1)[..., n_channel1]
arr2 = T(layer2)[..., n_channel2]
weights = (np.arange(batch_n)/float(batch_n-1))
S = 0
for n in range(batch_n):
S += (1-weights[n]) * tf.reduce_mean(arr1[n])
S += weights[n] * tf.reduce_mean(arr2[n])
return S
return inner
|
Interpolate between layer1, n_channel1 and layer2, n_channel2.
Optimize for a convex combination of layer1, n_channel1 and
layer2, n_channel2, transitioning across the batch.
Args:
layer1: layer to optimize 100% at batch=0.
n_channel1: neuron index to optimize 100% at batch=0.
layer2: layer to optimize 100% at batch=N.
n_channel2: neuron index to optimize 100% at batch=N.
Returns:
Objective
|
def unique(values):
"""
Hash table-based unique. Uniques are returned in order
of appearance. This does NOT sort.
Significantly faster than numpy.unique. Includes NA values.
Parameters
----------
values : 1d array-like
Returns
-------
numpy.ndarray or ExtensionArray
The return can be:
* Index : when the input is an Index
* Categorical : when the input is a Categorical dtype
* ndarray : when the input is a Series/ndarray
Return numpy.ndarray or ExtensionArray.
See Also
--------
Index.unique
Series.unique
Examples
--------
>>> pd.unique(pd.Series([2, 1, 3, 3]))
array([2, 1, 3])
>>> pd.unique(pd.Series([2] + [1] * 5))
array([2, 1])
>>> pd.unique(pd.Series([pd.Timestamp('20160101'),
... pd.Timestamp('20160101')]))
array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')
>>> pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'),
... pd.Timestamp('20160101', tz='US/Eastern')]))
array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')],
dtype=object)
>>> pd.unique(pd.Index([pd.Timestamp('20160101', tz='US/Eastern'),
... pd.Timestamp('20160101', tz='US/Eastern')]))
DatetimeIndex(['2016-01-01 00:00:00-05:00'],
... dtype='datetime64[ns, US/Eastern]', freq=None)
>>> pd.unique(list('baabc'))
array(['b', 'a', 'c'], dtype=object)
An unordered Categorical will return categories in the
order of appearance.
>>> pd.unique(pd.Series(pd.Categorical(list('baabc'))))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.unique(pd.Series(pd.Categorical(list('baabc'),
... categories=list('abc'))))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.unique(pd.Series(pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)))
[b, a, c]
Categories (3, object): [a < b < c]
An array of tuples
>>> pd.unique([('a', 'b'), ('b', 'a'), ('a', 'c'), ('b', 'a')])
array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object)
"""
values = _ensure_arraylike(values)
if is_extension_array_dtype(values):
# Dispatch to extension dtype's unique.
return values.unique()
original = values
htable, _, values, dtype, ndtype = _get_hashtable_algo(values)
table = htable(len(values))
uniques = table.unique(values)
uniques = _reconstruct_data(uniques, dtype, original)
return uniques
|
Hash table-based unique. Uniques are returned in order
of appearance. This does NOT sort.
Significantly faster than numpy.unique. Includes NA values.
Parameters
----------
values : 1d array-like
Returns
-------
numpy.ndarray or ExtensionArray
The return can be:
* Index : when the input is an Index
* Categorical : when the input is a Categorical dtype
* ndarray : when the input is a Series/ndarray
Return numpy.ndarray or ExtensionArray.
See Also
--------
Index.unique
Series.unique
Examples
--------
>>> pd.unique(pd.Series([2, 1, 3, 3]))
array([2, 1, 3])
>>> pd.unique(pd.Series([2] + [1] * 5))
array([2, 1])
>>> pd.unique(pd.Series([pd.Timestamp('20160101'),
... pd.Timestamp('20160101')]))
array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')
>>> pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'),
... pd.Timestamp('20160101', tz='US/Eastern')]))
array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')],
dtype=object)
>>> pd.unique(pd.Index([pd.Timestamp('20160101', tz='US/Eastern'),
... pd.Timestamp('20160101', tz='US/Eastern')]))
DatetimeIndex(['2016-01-01 00:00:00-05:00'],
... dtype='datetime64[ns, US/Eastern]', freq=None)
>>> pd.unique(list('baabc'))
array(['b', 'a', 'c'], dtype=object)
An unordered Categorical will return categories in the
order of appearance.
>>> pd.unique(pd.Series(pd.Categorical(list('baabc'))))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.unique(pd.Series(pd.Categorical(list('baabc'),
... categories=list('abc'))))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.unique(pd.Series(pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)))
[b, a, c]
Categories (3, object): [a < b < c]
An array of tuples
>>> pd.unique([('a', 'b'), ('b', 'a'), ('a', 'c'), ('b', 'a')])
array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object)
|
def set_home_position_send(self, target_system, latitude, longitude, altitude, x, y, z, q, approach_x, approach_y, approach_z, force_mavlink1=False):
'''
The position the system will return to and land on. The position is
set automatically by the system during the takeoff in
case it was not explicitely set by the operator before
or after. The global and local positions encode the
position in the respective coordinate frames, while
the q parameter encodes the orientation of the
surface. Under normal conditions it describes the
heading and terrain slope, which can be used by the
aircraft to adjust the approach. The approach 3D
vector describes the point to which the system should
fly in normal flight mode and then perform a landing
sequence along the vector.
target_system : System ID. (uint8_t)
latitude : Latitude (WGS84), in degrees * 1E7 (int32_t)
longitude : Longitude (WGS84, in degrees * 1E7 (int32_t)
altitude : Altitude (AMSL), in meters * 1000 (positive for up) (int32_t)
x : Local X position of this position in the local coordinate frame (float)
y : Local Y position of this position in the local coordinate frame (float)
z : Local Z position of this position in the local coordinate frame (float)
q : World to surface normal and heading transformation of the takeoff position. Used to indicate the heading and slope of the ground (float)
approach_x : Local X position of the end of the approach vector. Multicopters should set this position based on their takeoff path. Grass-landing fixed wing aircraft should set it the same way as multicopters. Runway-landing fixed wing aircraft should set it to the opposite direction of the takeoff, assuming the takeoff happened from the threshold / touchdown zone. (float)
approach_y : Local Y position of the end of the approach vector. Multicopters should set this position based on their takeoff path. Grass-landing fixed wing aircraft should set it the same way as multicopters. Runway-landing fixed wing aircraft should set it to the opposite direction of the takeoff, assuming the takeoff happened from the threshold / touchdown zone. (float)
approach_z : Local Z position of the end of the approach vector. Multicopters should set this position based on their takeoff path. Grass-landing fixed wing aircraft should set it the same way as multicopters. Runway-landing fixed wing aircraft should set it to the opposite direction of the takeoff, assuming the takeoff happened from the threshold / touchdown zone. (float)
'''
return self.send(self.set_home_position_encode(target_system, latitude, longitude, altitude, x, y, z, q, approach_x, approach_y, approach_z), force_mavlink1=force_mavlink1)
|
The position the system will return to and land on. The position is
set automatically by the system during the takeoff in
case it was not explicitely set by the operator before
or after. The global and local positions encode the
position in the respective coordinate frames, while
the q parameter encodes the orientation of the
surface. Under normal conditions it describes the
heading and terrain slope, which can be used by the
aircraft to adjust the approach. The approach 3D
vector describes the point to which the system should
fly in normal flight mode and then perform a landing
sequence along the vector.
target_system : System ID. (uint8_t)
latitude : Latitude (WGS84), in degrees * 1E7 (int32_t)
longitude : Longitude (WGS84, in degrees * 1E7 (int32_t)
altitude : Altitude (AMSL), in meters * 1000 (positive for up) (int32_t)
x : Local X position of this position in the local coordinate frame (float)
y : Local Y position of this position in the local coordinate frame (float)
z : Local Z position of this position in the local coordinate frame (float)
q : World to surface normal and heading transformation of the takeoff position. Used to indicate the heading and slope of the ground (float)
approach_x : Local X position of the end of the approach vector. Multicopters should set this position based on their takeoff path. Grass-landing fixed wing aircraft should set it the same way as multicopters. Runway-landing fixed wing aircraft should set it to the opposite direction of the takeoff, assuming the takeoff happened from the threshold / touchdown zone. (float)
approach_y : Local Y position of the end of the approach vector. Multicopters should set this position based on their takeoff path. Grass-landing fixed wing aircraft should set it the same way as multicopters. Runway-landing fixed wing aircraft should set it to the opposite direction of the takeoff, assuming the takeoff happened from the threshold / touchdown zone. (float)
approach_z : Local Z position of the end of the approach vector. Multicopters should set this position based on their takeoff path. Grass-landing fixed wing aircraft should set it the same way as multicopters. Runway-landing fixed wing aircraft should set it to the opposite direction of the takeoff, assuming the takeoff happened from the threshold / touchdown zone. (float)
|
def save_xml(self, doc, element):
'''Save this preceding condition into an xml.dom.Element object.'''
super(Preceding, self).save_xml(doc, element)
pre_element = doc.createElementNS(RTS_NS, RTS_NS_S + 'Preceding')
if self.timeout:
pre_element.setAttributeNS(RTS_NS, RTS_NS_S + 'timeout',
str(self.timeout))
if self.sending_timing:
pre_element.setAttributeNS(RTS_NS, RTS_NS_S + 'sendingTiming',
self.sending_timing)
for pc in self._preceding_components:
new_element = doc.createElementNS(RTS_NS,
RTS_NS_S + 'PrecedingComponents')
pc.save_xml(doc, new_element)
pre_element.appendChild(new_element)
element.appendChild(pre_element)
|
Save this preceding condition into an xml.dom.Element object.
|
def get_serializer(instance, plugin=None, model=None, *args, **kwargs):
"""
:param instance: model instance or queryset
:param plugin: plugin instance that is used to get serializer for
:param model: plugin model we build serializer for
:param kwargs: kwargs like many and other
:return:
"""
serializer_class = get_serializer_class(plugin=plugin, model=model)
if 'read_only' not in kwargs:
kwargs['read_only'] = True
return serializer_class(instance, *args, **kwargs)
|
:param instance: model instance or queryset
:param plugin: plugin instance that is used to get serializer for
:param model: plugin model we build serializer for
:param kwargs: kwargs like many and other
:return:
|
def _FracInt(x,y,z,a,b,c,tau,n):
"""Returns
1 x^2 y^2 z^2
-------------------------- (1 - ------- - ------- - -------)^n
sqrt(tau+a)(tau+b)(tau+c)) tau+a tau+b tau+c
"""
denom = np.sqrt((a + tau)*(b + tau)*(c + tau))
return (1. - x**2/(a + tau) - y**2/(b + tau) - z**2/(c + tau))**n / denom
|
Returns
1 x^2 y^2 z^2
-------------------------- (1 - ------- - ------- - -------)^n
sqrt(tau+a)(tau+b)(tau+c)) tau+a tau+b tau+c
|
def vlan_dot1q_tag_native(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vlan = ET.SubElement(config, "vlan", xmlns="urn:brocade.com:mgmt:brocade-vlan")
dot1q = ET.SubElement(vlan, "dot1q")
tag = ET.SubElement(dot1q, "tag")
native = ET.SubElement(tag, "native")
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def add_object(self, obj):
"""
Add object to local and app environment storage
:param obj: Instance of a AutoAPI object
"""
self.objects[obj.id] = obj
self.all_objects[obj.id] = obj
child_stack = list(obj.children)
while child_stack:
child = child_stack.pop()
self.all_objects[child.id] = child
child_stack.extend(getattr(child, "children", ()))
|
Add object to local and app environment storage
:param obj: Instance of a AutoAPI object
|
def tree(self, path, max_depth, full_path=False, include_stat=False):
"""DFS generator which starts from a given path and goes up to a max depth.
:param path: path from which the DFS will start
:param max_depth: max depth of DFS (0 means no limit)
:param full_path: should the full path of the child node be returned
:param include_stat: return the child Znode's stat along with the name & level
"""
for child_level_stat in self.do_tree(path, max_depth, 0, full_path, include_stat):
yield child_level_stat
|
DFS generator which starts from a given path and goes up to a max depth.
:param path: path from which the DFS will start
:param max_depth: max depth of DFS (0 means no limit)
:param full_path: should the full path of the child node be returned
:param include_stat: return the child Znode's stat along with the name & level
|
def query(self, query):
'''Returns an iterable of objects matching criteria expressed in `query`.
LoggingDatastore logs the access.
'''
self.logger.info('%s: query %s' % (self, query))
return super(LoggingDatastore, self).query(query)
|
Returns an iterable of objects matching criteria expressed in `query`.
LoggingDatastore logs the access.
|
def order_by(self, field_name=None):
"""
Returns a new QuerySet instance with the ordering changed.
"""
assert self.query.can_filter(), "Cannot reorder a query once a slice has been taken."
clone = self._clone()
clone.query.clear_ordering()
if field_name is not None:
clone.query.add_ordering(field_name)
return clone
|
Returns a new QuerySet instance with the ordering changed.
|
def dispatch_hook(cls, s=None, *_args, **_kwds):
# type: (Optional[str], *Any, **Any) -> base_classes.Packet_metaclass
"""dispatch_hook returns the subclass of HPackHeaders that must be used
to dissect the string.
"""
if s is None:
return config.conf.raw_layer
fb = orb(s[0])
if fb & 0x80 != 0:
return HPackIndexedHdr
if fb & 0x40 != 0:
return HPackLitHdrFldWithIncrIndexing
if fb & 0x20 != 0:
return HPackDynamicSizeUpdate
return HPackLitHdrFldWithoutIndexing
|
dispatch_hook returns the subclass of HPackHeaders that must be used
to dissect the string.
|
def send_msg(self, msg):
"""
Sends Zebra message.
:param msg: Instance of py:class: `ryu.lib.packet.zebra.ZebraMessage`.
:return: Serialized msg if succeeded, otherwise None.
"""
if not self.is_active:
self.logger.debug(
'Cannot send message: Already deactivated: msg=%s', msg)
return
elif not self.send_q:
self.logger.debug(
'Cannot send message: Send queue does not exist: msg=%s', msg)
return
elif self.zserv_ver != msg.version:
self.logger.debug(
'Zebra protocol version mismatch:'
'server_version=%d, msg.version=%d',
self.zserv_ver, msg.version)
msg.version = self.zserv_ver # fixup
self.send_q.put(msg.serialize())
|
Sends Zebra message.
:param msg: Instance of py:class: `ryu.lib.packet.zebra.ZebraMessage`.
:return: Serialized msg if succeeded, otherwise None.
|
def calibration_total_count(self):
"""The number of stimuli presentations (including reps) for the current calibration selected
:returns: int -- number of presentations
"""
if self.selected_calibration_index == 2:
return self.tone_calibrator.count()
else:
return self.bs_calibrator.count()
|
The number of stimuli presentations (including reps) for the current calibration selected
:returns: int -- number of presentations
|
def _get_binop_contexts(context, left, right):
"""Get contexts for binary operations.
This will return two inference contexts, the first one
for x.__op__(y), the other one for y.__rop__(x), where
only the arguments are inversed.
"""
# The order is important, since the first one should be
# left.__op__(right).
for arg in (right, left):
new_context = context.clone()
new_context.callcontext = contextmod.CallContext(args=[arg])
new_context.boundnode = None
yield new_context
|
Get contexts for binary operations.
This will return two inference contexts, the first one
for x.__op__(y), the other one for y.__rop__(x), where
only the arguments are inversed.
|
def barrier_layer_thickness(SA, CT):
"""
Compute the thickness of water separating the mixed surface layer from the
thermocline. A more precise definition would be the difference between
mixed layer depth (MLD) calculated from temperature minus the mixed layer
depth calculated using density.
"""
import gsw
sigma_theta = gsw.sigma0(SA, CT)
mask = mixed_layer_depth(CT)
mld = np.where(mask)[0][-1]
sig_surface = sigma_theta[0]
sig_bottom_mld = gsw.sigma0(SA[0], CT[mld])
d_sig_t = sig_surface - sig_bottom_mld
d_sig = sigma_theta - sig_bottom_mld
mask = d_sig < d_sig_t # Barrier layer.
return Series(mask, index=SA.index, name="BLT")
|
Compute the thickness of water separating the mixed surface layer from the
thermocline. A more precise definition would be the difference between
mixed layer depth (MLD) calculated from temperature minus the mixed layer
depth calculated using density.
|
def ltrim(self, name, start, end):
"""
Trim the list from start to end.
:param name: str the name of the redis key
:param start:
:param end:
:return: Future()
"""
with self.pipe as pipe:
return pipe.ltrim(self.redis_key(name), start, end)
|
Trim the list from start to end.
:param name: str the name of the redis key
:param start:
:param end:
:return: Future()
|
def predict_cumulative_hazard(self, X):
"""
Returns the hazard rates for the individuals
Parameters
----------
X: a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
"""
n, _ = X.shape
cols = _get_index(X)
if isinstance(X, pd.DataFrame):
order = self.cumulative_hazards_.columns
order = order.drop("_intercept") if self.fit_intercept else order
X_ = X[order].values
else:
X_ = X
X_ = X_ if not self.fit_intercept else np.c_[X_, np.ones((n, 1))]
timeline = self._index
individual_cumulative_hazards_ = pd.DataFrame(
np.dot(self.cumulative_hazards_, X_.T), index=timeline, columns=cols
)
return individual_cumulative_hazards_
|
Returns the hazard rates for the individuals
Parameters
----------
X: a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
|
def read(self, client):
"""
read current data of db row from plc
"""
assert(isinstance(self._bytearray, DB))
assert(self.row_size >= 0)
db_nr = self._bytearray.db_number
_bytearray = client.db_read(db_nr, self.db_offset, self.row_size)
data = self.get_bytearray()
# replace data in bytearray
for i, b in enumerate(_bytearray):
data[i + self.db_offset] = b
|
read current data of db row from plc
|
def find_package_indexes_in_dir(self, simple_dir):
"""Given a directory that contains simple packages indexes, return
a sorted list of normalized package names. This presumes every
directory within is a simple package index directory."""
packages = sorted(
{
# Filter out all of the "non" normalized names here
canonicalize_name(x)
for x in os.listdir(simple_dir)
}
)
# Package indexes must be in directories, so ignore anything else.
packages = [x for x in packages if os.path.isdir(os.path.join(simple_dir, x))]
return packages
|
Given a directory that contains simple packages indexes, return
a sorted list of normalized package names. This presumes every
directory within is a simple package index directory.
|
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(RiemannHandler, self).get_default_config_help()
config.update({
'host': '',
'port': '',
'transport': 'tcp or udp',
})
return config
|
Returns the help text for the configuration options for this handler
|
def build(
self,
endpoint,
values=None,
method=None,
force_external=False,
append_unknown=True,
):
"""Building URLs works pretty much the other way round. Instead of
`match` you call `build` and pass it the endpoint and a dict of
arguments for the placeholders.
The `build` function also accepts an argument called `force_external`
which, if you set it to `True` will force external URLs. Per default
external URLs (include the server name) will only be used if the
target URL is on a different subdomain.
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/downloads/', endpoint='downloads/index'),
... Rule('/downloads/<int:id>', endpoint='downloads/show')
... ])
>>> urls = m.bind("example.com", "/")
>>> urls.build("index", {})
'/'
>>> urls.build("downloads/show", {'id': 42})
'/downloads/42'
>>> urls.build("downloads/show", {'id': 42}, force_external=True)
'http://example.com/downloads/42'
Because URLs cannot contain non ASCII data you will always get
bytestrings back. Non ASCII characters are urlencoded with the
charset defined on the map instance.
Additional values are converted to unicode and appended to the URL as
URL querystring parameters:
>>> urls.build("index", {'q': 'My Searchstring'})
'/?q=My+Searchstring'
When processing those additional values, lists are furthermore
interpreted as multiple values (as per
:py:class:`werkzeug.datastructures.MultiDict`):
>>> urls.build("index", {'q': ['a', 'b', 'c']})
'/?q=a&q=b&q=c'
Passing a ``MultiDict`` will also add multiple values:
>>> urls.build("index", MultiDict((('p', 'z'), ('q', 'a'), ('q', 'b'))))
'/?p=z&q=a&q=b'
If a rule does not exist when building a `BuildError` exception is
raised.
The build method accepts an argument called `method` which allows you
to specify the method you want to have an URL built for if you have
different methods for the same endpoint specified.
.. versionadded:: 0.6
the `append_unknown` parameter was added.
:param endpoint: the endpoint of the URL to build.
:param values: the values for the URL to build. Unhandled values are
appended to the URL as query parameters.
:param method: the HTTP method for the rule if there are different
URLs for different methods on the same endpoint.
:param force_external: enforce full canonical external URLs. If the URL
scheme is not provided, this will generate
a protocol-relative URL.
:param append_unknown: unknown parameters are appended to the generated
URL as query string argument. Disable this
if you want the builder to ignore those.
"""
self.map.update()
if values:
if isinstance(values, MultiDict):
temp_values = {}
# iteritems(dict, values) is like `values.lists()`
# without the call or `list()` coercion overhead.
for key, value in iteritems(dict, values):
if not value:
continue
if len(value) == 1: # flatten single item lists
value = value[0]
if value is None: # drop None
continue
temp_values[key] = value
values = temp_values
else:
# drop None
values = dict(i for i in iteritems(values) if i[1] is not None)
else:
values = {}
rv = self._partial_build(endpoint, values, method, append_unknown)
if rv is None:
raise BuildError(endpoint, values, method, self)
domain_part, path = rv
host = self.get_host(domain_part)
# shortcut this.
if not force_external and (
(self.map.host_matching and host == self.server_name)
or (not self.map.host_matching and domain_part == self.subdomain)
):
return "%s/%s" % (self.script_name.rstrip("/"), path.lstrip("/"))
return str(
"%s//%s%s/%s"
% (
self.url_scheme + ":" if self.url_scheme else "",
host,
self.script_name[:-1],
path.lstrip("/"),
)
)
|
Building URLs works pretty much the other way round. Instead of
`match` you call `build` and pass it the endpoint and a dict of
arguments for the placeholders.
The `build` function also accepts an argument called `force_external`
which, if you set it to `True` will force external URLs. Per default
external URLs (include the server name) will only be used if the
target URL is on a different subdomain.
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/downloads/', endpoint='downloads/index'),
... Rule('/downloads/<int:id>', endpoint='downloads/show')
... ])
>>> urls = m.bind("example.com", "/")
>>> urls.build("index", {})
'/'
>>> urls.build("downloads/show", {'id': 42})
'/downloads/42'
>>> urls.build("downloads/show", {'id': 42}, force_external=True)
'http://example.com/downloads/42'
Because URLs cannot contain non ASCII data you will always get
bytestrings back. Non ASCII characters are urlencoded with the
charset defined on the map instance.
Additional values are converted to unicode and appended to the URL as
URL querystring parameters:
>>> urls.build("index", {'q': 'My Searchstring'})
'/?q=My+Searchstring'
When processing those additional values, lists are furthermore
interpreted as multiple values (as per
:py:class:`werkzeug.datastructures.MultiDict`):
>>> urls.build("index", {'q': ['a', 'b', 'c']})
'/?q=a&q=b&q=c'
Passing a ``MultiDict`` will also add multiple values:
>>> urls.build("index", MultiDict((('p', 'z'), ('q', 'a'), ('q', 'b'))))
'/?p=z&q=a&q=b'
If a rule does not exist when building a `BuildError` exception is
raised.
The build method accepts an argument called `method` which allows you
to specify the method you want to have an URL built for if you have
different methods for the same endpoint specified.
.. versionadded:: 0.6
the `append_unknown` parameter was added.
:param endpoint: the endpoint of the URL to build.
:param values: the values for the URL to build. Unhandled values are
appended to the URL as query parameters.
:param method: the HTTP method for the rule if there are different
URLs for different methods on the same endpoint.
:param force_external: enforce full canonical external URLs. If the URL
scheme is not provided, this will generate
a protocol-relative URL.
:param append_unknown: unknown parameters are appended to the generated
URL as query string argument. Disable this
if you want the builder to ignore those.
|
def extract_files(self, resource):
"""
:param resource str|iterable files, a file or a directory
@return: iterable
"""
if hasattr(resource, "__iter__"):
files = [file for file in resource if self.can_be_extracted(file)]
elif os.path.isfile(resource):
files = [resource] if self.can_be_extracted(resource) else []
else:
files = self._extract_from_directory(resource)
return files
|
:param resource str|iterable files, a file or a directory
@return: iterable
|
def repmct(instr, marker, value, repcase, lenout=None):
"""
Replace a marker with the text representation of a
cardinal number.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/repmc_c.html
:param instr: Input string.
:type instr: str
:param marker: Marker to be replaced.
:type marker: str
:param value: Replacement value.
:type value: int
:param repcase: Case of replacement text.
:type repcase: str
:param lenout: Optional available space in output string
:type lenout: int
:return: Output string.
:rtype: str
"""
if lenout is None:
lenout = ctypes.c_int(len(instr) + len(marker) + 15)
instr = stypes.stringToCharP(instr)
marker = stypes.stringToCharP(marker)
value = ctypes.c_int(value)
repcase = ctypes.c_char(repcase.encode(encoding='UTF-8'))
out = stypes.stringToCharP(lenout)
libspice.repmct_c(instr, marker, value, repcase, lenout, out)
return stypes.toPythonString(out)
|
Replace a marker with the text representation of a
cardinal number.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/repmc_c.html
:param instr: Input string.
:type instr: str
:param marker: Marker to be replaced.
:type marker: str
:param value: Replacement value.
:type value: int
:param repcase: Case of replacement text.
:type repcase: str
:param lenout: Optional available space in output string
:type lenout: int
:return: Output string.
:rtype: str
|
def get_date(self, p_tag):
""" Given a date tag, return a date object. """
string = self.tag_value(p_tag)
result = None
try:
result = date_string_to_date(string) if string else None
except ValueError:
pass
return result
|
Given a date tag, return a date object.
|
def convert(self, newstart: str) -> None:
"""Convert to another list type by replacing starting pattern."""
match = self._match
ms = match.start()
for s, e in reversed(match.spans('pattern')):
self[s - ms:e - ms] = newstart
self.pattern = escape(newstart)
|
Convert to another list type by replacing starting pattern.
|
def create_ascii_table(observation_table, outfile):
"""Given a table of observations create an ascii log file for easy parsing.
Store the result in outfile (could/should be a vospace dataNode)
observation_table: astropy.votable.array object
outfile: str (name of the vospace dataNode to store the result to)
"""
logging.info("writing text log to %s" % outfile)
stamp = "#\n# Last Updated: " + time.asctime() + "\n#\n"
header = "| %20s | %20s | %20s | %20s | %20s | %20s | %20s |\n" % (
"EXPNUM", "OBS-DATE", "FIELD", "EXPTIME(s)", "RA", "DEC", "RUNID")
bar = "=" * (len(header) - 1) + "\n"
if outfile[0:4] == "vos:":
temp_file = tempfile.NamedTemporaryFile(suffix='.txt')
fout = temp_file
else:
fout = open(outfile, 'w')
t2 = None
fout.write(bar + stamp + bar + header)
populated = storage.list_dbimages()
for i in range(len(observation_table) - 1, -1, -1):
row = observation_table.data[i]
if row['dataset_name'] not in populated:
storage.populate(row['dataset_name'])
str_date = str(ephem.date(row.StartDate +
2400000.5 -
ephem.julian_date(ephem.date(0))))[:20]
t1 = time.strptime(str_date, "%Y/%m/%d %H:%M:%S")
if t2 is None or math.fabs(time.mktime(t2) - time.mktime(t1)) > 3 * 3600.0:
fout.write(bar)
t2 = t1
ra = str(ephem.hours(math.radians(row.RA)))
dec = str(ephem.degrees(math.radians(row.DEC)))
line = "| %20s | %20s | %20s | %20.1f | %20s | %20s | %20s |\n" % (
str(row.dataset_name),
str(ephem.date(row.StartDate + 2400000.5 -
ephem.julian_date(ephem.date(0))))[:20],
row.TargetName[:20],
row.ExposureTime, ra[:20], dec[:20], row.ProposalID[:20])
fout.write(line)
fout.write(bar)
if outfile[0:4] == "vos:":
fout.flush()
storage.copy(fout.name, outfile)
fout.close()
return
|
Given a table of observations create an ascii log file for easy parsing.
Store the result in outfile (could/should be a vospace dataNode)
observation_table: astropy.votable.array object
outfile: str (name of the vospace dataNode to store the result to)
|
def target_slide(self):
"""
A reference to the slide in this presentation that is the target of
the slide jump action in this shape. Slide jump actions include
`PP_ACTION.FIRST_SLIDE`, `LAST_SLIDE`, `NEXT_SLIDE`,
`PREVIOUS_SLIDE`, and `NAMED_SLIDE`. Returns |None| for all other
actions. In particular, the `LAST_SLIDE_VIEWED` action and the `PLAY`
(start other presentation) actions are not supported.
A slide object may be assigned to this property, which makes the
shape an "internal hyperlink" to the assigened slide::
slide, target_slide = prs.slides[0], prs.slides[1]
shape = slide.shapes[0]
shape.target_slide = target_slide
Assigning |None| removes any slide jump action. Note that this is
accomplished by removing any action present (such as a hyperlink),
without first checking that it is a slide jump action.
"""
slide_jump_actions = (
PP_ACTION.FIRST_SLIDE,
PP_ACTION.LAST_SLIDE,
PP_ACTION.NEXT_SLIDE,
PP_ACTION.PREVIOUS_SLIDE,
PP_ACTION.NAMED_SLIDE,
)
if self.action not in slide_jump_actions:
return None
if self.action == PP_ACTION.FIRST_SLIDE:
return self._slides[0]
elif self.action == PP_ACTION.LAST_SLIDE:
return self._slides[-1]
elif self.action == PP_ACTION.NEXT_SLIDE:
next_slide_idx = self._slide_index + 1
if next_slide_idx >= len(self._slides):
raise ValueError('no next slide')
return self._slides[next_slide_idx]
elif self.action == PP_ACTION.PREVIOUS_SLIDE:
prev_slide_idx = self._slide_index - 1
if prev_slide_idx < 0:
raise ValueError('no previous slide')
return self._slides[prev_slide_idx]
elif self.action == PP_ACTION.NAMED_SLIDE:
rId = self._hlink.rId
return self.part.related_parts[rId].slide
|
A reference to the slide in this presentation that is the target of
the slide jump action in this shape. Slide jump actions include
`PP_ACTION.FIRST_SLIDE`, `LAST_SLIDE`, `NEXT_SLIDE`,
`PREVIOUS_SLIDE`, and `NAMED_SLIDE`. Returns |None| for all other
actions. In particular, the `LAST_SLIDE_VIEWED` action and the `PLAY`
(start other presentation) actions are not supported.
A slide object may be assigned to this property, which makes the
shape an "internal hyperlink" to the assigened slide::
slide, target_slide = prs.slides[0], prs.slides[1]
shape = slide.shapes[0]
shape.target_slide = target_slide
Assigning |None| removes any slide jump action. Note that this is
accomplished by removing any action present (such as a hyperlink),
without first checking that it is a slide jump action.
|
def num_listeners(self, event=None):
"""Return the number of listeners for ``event``.
Return the total number of listeners for all events on this object if
``event`` is :class:`None`.
"""
if event is not None:
return len(self._listeners[event])
else:
return sum(len(l) for l in self._listeners.values())
|
Return the number of listeners for ``event``.
Return the total number of listeners for all events on this object if
``event`` is :class:`None`.
|
def format_year(year):
"""
Format the year value of the ``YearArchiveView``,
which can be a integer or date object.
This tag is no longer needed, but exists for template compatibility.
It was a compatibility tag for Django 1.4.
"""
if isinstance(year, (date, datetime)):
# Django 1.5 and up, 'year' is a date object, consistent with month+day views.
return unicode(year.year)
else:
# Django 1.4 just passes the kwarg as string.
return unicode(year)
|
Format the year value of the ``YearArchiveView``,
which can be a integer or date object.
This tag is no longer needed, but exists for template compatibility.
It was a compatibility tag for Django 1.4.
|
def store_field(self, state, field_name, field_type, value):
"""
Store a field of a given object, without resolving hierachy
:param state: angr state where we want to allocate the object attribute
:type SimState
:param field_name: name of the attribute
:type str
:param field_value: attibute's value
:type SimSootValue
"""
field_ref = SimSootValue_InstanceFieldRef(self.heap_alloc_id, self.type, field_name, field_type)
state.memory.store(field_ref, value)
|
Store a field of a given object, without resolving hierachy
:param state: angr state where we want to allocate the object attribute
:type SimState
:param field_name: name of the attribute
:type str
:param field_value: attibute's value
:type SimSootValue
|
def _load_stats(self):
""" Load the webpack-stats file """
for attempt in range(0, 3):
try:
with self.stats_file.open() as f:
return json.load(f)
except ValueError:
# If we failed to parse the JSON, it's possible that the
# webpack process is writing to it concurrently and it's in a
# bad state. Sleep and retry.
if attempt < 2:
time.sleep(attempt * 0.2)
else:
raise
except IOError:
raise IOError(
"Could not read stats file {0}. Make sure you are using the "
"webpack-bundle-tracker plugin" .format(self.stats_file))
|
Load the webpack-stats file
|
def event_choices(events):
""" Get the possible events from settings """
if events is None:
msg = "Please add some events in settings.WEBHOOK_EVENTS."
raise ImproperlyConfigured(msg)
try:
choices = [(x, x) for x in events]
except TypeError:
""" Not a valid iterator, so we raise an exception """
msg = "settings.WEBHOOK_EVENTS must be an iterable object."
raise ImproperlyConfigured(msg)
return choices
|
Get the possible events from settings
|
def replace(self, old, new):
"""Replace an instruction"""
if old.type != new.type:
raise TypeError("new instruction has a different type")
pos = self.instructions.index(old)
self.instructions.remove(old)
self.instructions.insert(pos, new)
for bb in self.parent.basic_blocks:
for instr in bb.instructions:
instr.replace_usage(old, new)
|
Replace an instruction
|
def pep440_dev_version(self, verbose=False, non_local=False):
""" Return a PEP-440 dev version appendix to the main version number.
Result is ``None`` if the workdir is in a release-ready state
(i.e. clean and properly tagged).
"""
version = capture("python setup.py --version", echo=verbose)
if verbose:
notify.info("setuptools version = '{}'".format(version))
now = '{:%Y%m%d!%H%M}'.format(datetime.now())
tag = capture("git describe --long --tags --dirty='!{}'".format(now), echo=verbose)
if verbose:
notify.info("git describe = '{}'".format(tag))
try:
tag, date, time = tag.split('!')
except ValueError:
date = time = ''
tag, commits, short_hash = tag.rsplit('-', 3)
label = tag
if re.match(r"v[0-9]+(\.[0-9]+)*", label):
label = label[1:]
# Make a PEP-440 version appendix, the format is:
# [N!]N(.N)*[{a|b|rc}N][.postN][.devN][+<local version label>]
if commits == '0' and label == version:
pep440 = None
else:
local_part = [
re.sub(r"[^a-zA-Z0-9]+", '.', label).strip('.'), # reduce to alphanum and dots
short_hash,
date + ('T' + time if time else ''),
]
build_number = os.environ.get('BUILD_NUMBER', 'n/a')
if build_number.isdigit():
local_part.extend(['ci', build_number])
if verbose:
notify.info("Adding CI build ID #{} to version".format(build_number))
local_part = [i for i in local_part if i]
pep440 = '.dev{}+{}'.format(commits, '.'.join(local_part).strip('.'))
if non_local:
pep440, _ = pep440.split('+', 1)
return pep440
|
Return a PEP-440 dev version appendix to the main version number.
Result is ``None`` if the workdir is in a release-ready state
(i.e. clean and properly tagged).
|
def iteration(self, node_status=True):
"""
Execute a single model iteration
:return: Iteration_id, Incremental node status (dictionary node->status)
"""
self.clean_initial_status(self.available_statuses.values())
actual_status = {node: nstatus for node, nstatus in future.utils.iteritems(self.status)}
# streaming
if self.stream_execution:
u, v = list(self.graph.edges())[0]
u_status = self.status[u]
v_status = self.status[v]
if u_status == 1 and v_status == 0:
p = np.random.random_sample()
if p < self.params['model']['beta']:
actual_status[v] = 1
if v_status == 1 and u_status == 0:
p = np.random.random_sample()
if p < self.params['model']['beta']:
actual_status[u] = 1
delta, node_count, status_delta = self.status_delta(actual_status)
self.status = actual_status
self.actual_iteration += 1
if node_status:
return {"iteration": self.actual_iteration - 1, "status": delta.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": self.actual_iteration - 1, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
# snapshot
else:
if self.actual_iteration == 0:
self.actual_iteration += 1
delta, node_count, status_delta = self.status_delta(actual_status)
if node_status:
return {"iteration": 0, "status": actual_status.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": 0, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
for u in self.graph.nodes():
u_status = self.status[u]
eventp = np.random.random_sample()
neighbors = self.graph.neighbors(u)
if isinstance(self.graph, nx.DiGraph):
neighbors = self.graph.predecessors(u)
if u_status == 0:
infected_neighbors = len([v for v in neighbors if self.status[v] == 1])
if eventp < self.params['model']['beta'] * infected_neighbors:
actual_status[u] = 1
delta, node_count, status_delta = self.status_delta(actual_status)
self.status = actual_status
self.actual_iteration += 1
if node_status:
return {"iteration": self.actual_iteration - 1, "status": delta.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": self.actual_iteration - 1, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
|
Execute a single model iteration
:return: Iteration_id, Incremental node status (dictionary node->status)
|
def steady_connection(self):
"""Get a steady, non-persistent PyGreSQL connection."""
return SteadyPgConnection(
self._maxusage, self._setsession, self._closeable,
*self._args, **self._kwargs)
|
Get a steady, non-persistent PyGreSQL connection.
|
def post_optimization_step(self, batch_info, device, model, rollout):
""" Steps to take after optimization has been done"""
if batch_info.aggregate_batch_number % self.target_update_frequency == 0:
self.target_model.load_state_dict(model.state_dict())
self.target_model.eval()
|
Steps to take after optimization has been done
|
def zonal_mean_column(num_lat=90, num_lev=30, water_depth=10., lat=None,
lev=None, **kwargs):
"""Creates two Domains with one water cell, a latitude axis and
a level/height axis.
* SlabOcean: one water cell and a latitude axis above
(similar to :func:`zonal_mean_surface`)
* Atmosphere: a latitude axis and a level/height axis (two dimensional)
**Function-call argument** \n
:param int num_lat: number of latitude points on the axis
[default: 90]
:param int num_lev: number of pressure levels
(evenly spaced from surface to TOA) [default: 30]
:param float water_depth: depth of the water cell (slab ocean) [default: 10.]
:param lat: specification for latitude axis (optional)
:type lat: :class:`~climlab.domain.axis.Axis` or latitude array
:param lev: specification for height axis (optional)
:type lev: :class:`~climlab.domain.axis.Axis` or pressure array
:raises: :exc:`ValueError` if `lat` is given but neither Axis nor latitude array.
:raises: :exc:`ValueError` if `lev` is given but neither Axis nor pressure array.
:returns: a list of 2 Domain objects (slab ocean, atmosphere)
:rtype: :py:class:`list` of :class:`SlabOcean`, :class:`Atmosphere`
:Example:
::
>>> from climlab import domain
>>> sfc, atm = domain.zonal_mean_column(num_lat=36,num_lev=10)
>>> print sfc
climlab Domain object with domain_type=ocean and shape=(36, 1)
>>> print atm
climlab Domain object with domain_type=atm and shape=(36, 10)
"""
if lat is None:
latax = Axis(axis_type='lat', num_points=num_lat)
elif isinstance(lat, Axis):
latax = lat
else:
try:
latax = Axis(axis_type='lat', points=lat)
except:
raise ValueError('lat must be Axis object or latitude array')
if lev is None:
levax = Axis(axis_type='lev', num_points=num_lev)
elif isinstance(lev, Axis):
levax = lev
else:
try:
levax = Axis(axis_type='lev', points=lev)
except:
raise ValueError('lev must be Axis object or pressure array')
depthax = Axis(axis_type='depth', bounds=[water_depth, 0.])
#axes = {'depth': depthax, 'lat': latax, 'lev': levax}
slab = SlabOcean(axes={'lat':latax, 'depth':depthax}, **kwargs)
atm = Atmosphere(axes={'lat':latax, 'lev':levax}, **kwargs)
return slab, atm
|
Creates two Domains with one water cell, a latitude axis and
a level/height axis.
* SlabOcean: one water cell and a latitude axis above
(similar to :func:`zonal_mean_surface`)
* Atmosphere: a latitude axis and a level/height axis (two dimensional)
**Function-call argument** \n
:param int num_lat: number of latitude points on the axis
[default: 90]
:param int num_lev: number of pressure levels
(evenly spaced from surface to TOA) [default: 30]
:param float water_depth: depth of the water cell (slab ocean) [default: 10.]
:param lat: specification for latitude axis (optional)
:type lat: :class:`~climlab.domain.axis.Axis` or latitude array
:param lev: specification for height axis (optional)
:type lev: :class:`~climlab.domain.axis.Axis` or pressure array
:raises: :exc:`ValueError` if `lat` is given but neither Axis nor latitude array.
:raises: :exc:`ValueError` if `lev` is given but neither Axis nor pressure array.
:returns: a list of 2 Domain objects (slab ocean, atmosphere)
:rtype: :py:class:`list` of :class:`SlabOcean`, :class:`Atmosphere`
:Example:
::
>>> from climlab import domain
>>> sfc, atm = domain.zonal_mean_column(num_lat=36,num_lev=10)
>>> print sfc
climlab Domain object with domain_type=ocean and shape=(36, 1)
>>> print atm
climlab Domain object with domain_type=atm and shape=(36, 10)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.