input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<gh_stars>0
# -*- coding: utf-8 -*-
from toshi.handlers import BaseHandler
from toshi.errors import JSONHTTPError
from toshi.jsonrpc.errors import JsonRPCInternalError
from toshi.database import DatabaseMixin
from toshi.ethereum.mixin import EthereumMixin
from toshi.jsonrpc.errors import JsonRPCError
from toshi.redis import RedisMixin
from toshi.analytics import AnalyticsMixin
from toshi.sofa import SofaPayment
from toshi.handlers import RequestVerificationMixin, SimpleFileHandler
from toshi.utils import validate_address, parse_int
from toshi.log import log, log_headers_on_error
from .mixins import BalanceMixin
from .jsonrpc import ToshiEthJsonRPC
from .utils import database_transaction_to_rlp_transaction
from toshi.ethereum.tx import transaction_to_json, DEFAULT_GASPRICE
from tornado.escape import json_encode
class TokenHandler(DatabaseMixin, SimpleFileHandler):
async def get(self, symbol_png=None):
if symbol_png:
# remove .png suffix required by URL regex
symbol = symbol_png[:-4]
async with self.db:
row = await self.db.fetchrow(
"SELECT * FROM tokens WHERE symbol = $1",
symbol
)
if row is None:
raise HTTPError(404)
await self.handle_file_response(
data=row['icon'],
content_type="image/png",
etag=row['hash'],
last_modified=row['last_modified']
)
else:
# list available tokens
async with self.db:
rows = await self.db.fetch(
"SELECT symbol, name, decimals FROM tokens "
"ORDER BY symbol ASC"
)
tokens = [dict(symbol=r['symbol'],
name=r['name'],
decimals=r['decimals'],
icon_url="/token/{}.png".format(r['symbol']))
for r in rows]
self.write({"tokens": tokens})
class BalanceHandler(DatabaseMixin, EthereumMixin, BaseHandler):
async def get(self, address):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "x-requested-with")
self.set_header('Access-Control-Allow-Methods', 'GET')
try:
result = await ToshiEthJsonRPC(None, self.application, self.request).get_balance(address)
except JsonRPCError as e:
raise JSONHTTPError(400, body={'errors': [e.data]})
self.write(result)
class TransactionSkeletonHandler(EthereumMixin, RedisMixin, BaseHandler):
async def post(self):
try:
# normalize inputs
if 'from' in self.json:
self.json['from_address'] = self.json.pop('from')
if 'to' in self.json:
self.json['to_address'] = self.json.pop('to')
elif 'to_address' not in self.json:
self.json['to_address'] = None
# the following are to deal with different representations
# of the same concept from different places
if 'gasPrice' in self.json:
self.json['gas_price'] = self.json.pop('gasPrice')
if 'gasprice' in self.json:
self.json['gas_price'] = self.json.pop('gasprice')
if 'startgas' in self.json:
self.json['gas'] = self.json.pop('startgas')
result = await ToshiEthJsonRPC(None, self.application, self.request).create_transaction_skeleton(**self.json)
except JsonRPCError as e:
log.warning("/tx/skel failed: " + json_encode(e.data) + "\" -> arguments: " + json_encode(self.json) + "\"")
raise JSONHTTPError(400, body={'errors': [e.data]})
except TypeError:
log.warning("/tx/skel failed: bad arguments \"" + json_encode(self.json) + "\"")
raise JSONHTTPError(400, body={'errors': [{'id': 'bad_arguments', 'message': 'Bad Arguments'}]})
self.write(result)
class SendTransactionHandler(BalanceMixin, EthereumMixin, DatabaseMixin, RedisMixin, RequestVerificationMixin, BaseHandler):
async def post(self):
if self.is_request_signed():
sender_toshi_id = self.verify_request()
else:
# this is an anonymous transaction
sender_toshi_id = None
try:
result = await ToshiEthJsonRPC(sender_toshi_id, self.application, self.request).send_transaction(**self.json)
except JsonRPCInternalError as e:
raise JSONHTTPError(500, body={'errors': [e.data]})
except JsonRPCError as e:
raise JSONHTTPError(400, body={'errors': [e.data]})
except TypeError:
raise JSONHTTPError(400, body={'errors': [{'id': 'bad_arguments', 'message': 'Bad Arguments'}]})
self.write({
"tx_hash": result
})
class TransactionHandler(EthereumMixin, DatabaseMixin, BaseHandler):
async def get(self, tx_hash):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "x-requested-with")
self.set_header('Access-Control-Allow-Methods', 'GET')
format = self.get_query_argument('format', 'rpc').lower()
try:
tx = await ToshiEthJsonRPC(None, self.application, self.request).get_transaction(tx_hash)
except JsonRPCError as e:
raise JSONHTTPError(400, body={'errors': [e.data]})
if tx is None and format != 'sofa':
raise JSONHTTPError(404, body={'error': [{'id': 'not_found', 'message': 'Not Found'}]})
if format == 'sofa':
async with self.db:
row = await self.db.fetchrow(
"SELECT * FROM transactions where hash = $1 ORDER BY transaction_id DESC",
tx_hash)
if row is None:
raise JSONHTTPError(404, body={'error': [{'id': 'not_found', 'message': 'Not Found'}]})
if tx is None:
tx = transaction_to_json(database_transaction_to_rlp_transaction(row))
if row['status'] == 'error':
tx['error'] = True
payment = SofaPayment.from_transaction(tx, networkId=self.application.config['ethereum']['network_id'])
message = payment.render()
self.set_header('Content-Type', 'text/plain')
self.write(message.encode('utf-8'))
else:
self.write(tx)
class CancelTransactionHandler(EthereumMixin, DatabaseMixin, BaseHandler):
async def post(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "x-requested-with")
self.set_header('Access-Control-Allow-Methods', 'POST')
if 'tx_hash' not in self.json or 'signature' not in self.json:
raise JSONHTTPError(400, body={'errors': [{'id': 'bad_arguments', 'message': 'Bad Arguments'}]})
tx_hash = self.json['tx_hash']
signature = self.json['signature']
try:
await ToshiEthJsonRPC(None, self.application, self.request).cancel_queued_transaction(tx_hash, signature)
except JsonRPCError as e:
raise JSONHTTPError(400, body={'errors': [e.data]})
self.set_status(204)
class AddressHandler(DatabaseMixin, BaseHandler):
async def get(self, address):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "x-requested-with")
self.set_header('Access-Control-Allow-Methods', 'GET')
offset = parse_int(self.get_argument('offset', '0'))
limit = parse_int(self.get_argument('limit', '10'))
status = set([s.lower() for s in self.get_arguments('status')])
direction = set([d.lower() for d in self.get_arguments('direction')])
order = self.get_argument('order', 'desc').upper()
if not validate_address(address) or \
offset is None or \
limit is None or \
(status and not status.issubset(['confirmed', 'unconfirmed', 'queued', 'error'])) or \
(direction and not direction.issubset(['in', 'out'])) or \
(order not in ['DESC', 'ASC']):
raise JSONHTTPError(400, body={'id': 'bad_arguments', 'message': 'Bad Arguments'})
query = "SELECT * FROM transactions WHERE "
args = [address, offset, limit]
if len(direction) == 0 or len(direction) == 2:
query += "(from_address = $1 OR to_address = $1) "
elif 'in' in direction:
query += "to_address = $1 "
elif 'out' in direction:
query += "from_address = $1 "
if len(status) == 0:
query += "AND (status != $4 OR status IS NULL) "
args.append('error')
else:
status_query = []
for s in status:
if s == 'queued':
status_query.extend(["status = ${}".format(len(args) + 1), "status IS NULL"])
else:
status_query.append("status = ${}".format(len(args) + 1))
args.append(s)
query += "AND (" + " OR ".join(status_query) + ") "
query += "ORDER BY created {} OFFSET $2 LIMIT $3".format(order)
async with self.db:
rows = await self.db.fetch(query, *args)
transactions = []
for row in rows:
transactions.append({
"hash": row['hash'],
"to": row['to_address'],
"from": row['from_address'],
"nonce": hex(row['nonce']),
"value": row['value'],
"gas": row['gas'],
"gas_price": row['gas_price'],
"created_data": row['created'].isoformat(),
"confirmed_data": row['updated'].isoformat() if row['blocknumber'] else None,
"status": row['status'] if row['status'] is not None else 'queued',
"data": row['data']
})
resp = {
"transactions": transactions,
"offset": offset,
"limit": limit,
"order": order
}
if len(direction) == 1:
resp['direction'] = direction.pop()
if status:
resp['status'] = "&".join(status)
self.write(resp)
class GasPriceHandler(RedisMixin, BaseHandler):
def get(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "x-requested-with")
self.set_header('Access-Control-Allow-Methods', 'GET')
gas_station_gas_price = self.redis.get('gas_station_standard_gas_price')
if gas_station_gas_price is None:
gas_station_gas_price = hex(self.application.config['ethereum'].getint('default_gasprice', DEFAULT_GASPRICE))
self.write({
"gas_price": gas_station_gas_price
})
class PNRegistrationHandler(RequestVerificationMixin, DatabaseMixin, BaseHandler):
@log_headers_on_error
async def post(self, service):
toshi_id = self.verify_request()
payload = self.json
if not all(arg in payload for arg in ['registration_id']):
raise JSONHTTPError(400, body={'errors': [{'id': 'bad_arguments', 'message': 'Bad Arguments'}]})
# TODO: registration id verification
# XXX: BACKWARDS COMPAT FOR OLD PN REGISTARTION
# remove when no longer needed
if 'address' not in payload:
async with self.db:
legacy = await self.db.fetch("SELECT eth_address FROM notification_registrations "
"WHERE toshi_id = $1 AND service = 'LEGACY' AND registration_id = 'LEGACY'",
toshi_id)
else:
legacy = False
if legacy:
async with self.db:
for row in legacy:
eth_address = row['eth_address']
await self.db.execute(
"INSERT INTO notification_registrations (toshi_id, service, registration_id, eth_address) "
"VALUES ($1, $2, $3, $4) ON CONFLICT (toshi_id, service, registration_id, eth_address) DO NOTHING",
toshi_id, service, payload['registration_id'], eth_address)
await self.db.execute(
"DELETE FROM notification_registrations "
"WHERE toshi_id = $1 AND service = 'LEGACY' AND registration_id = 'LEGACY'", toshi_id)
await self.db.commit()
else:
# eth address verification (default to toshi_id if eth_address is not supplied)
eth_address = payload['address'] if 'address' in payload else toshi_id
if not validate_address(eth_address):
raise JSONHTTPError(data={'id': 'bad_arguments', 'message': 'Bad Arguments'})
async with self.db:
await self.db.execute(
"INSERT INTO notification_registrations (toshi_id, service, registration_id, eth_address) "
"VALUES ($1, $2, $3, $4) ON CONFLICT (toshi_id, service, registration_id, eth_address) DO NOTHING",
toshi_id, service, payload['registration_id'], eth_address)
# XXX: temporary fix for old ios versions sending their payment address as toshi_id
# should be removed after enough time has passed that most people should be using the fixed version
if eth_address != toshi_id:
# remove any apn registrations where toshi_id == eth_address for this eth_address
await self.db.execute(
"DELETE FROM notification_registrations "
"WHERE toshi_id = $1 AND eth_address = $1 AND service = 'apn'", eth_address)
await self.db.commit()
self.set_status(204)
class PNDeregistrationHandler(RequestVerificationMixin, AnalyticsMixin, DatabaseMixin, BaseHandler):
async def post(self, service):
toshi_id = self.verify_request()
payload = self.json
if 'registration_id' not in payload:
raise JSONHTTPError(400, body={'errors': [{'id': 'bad_arguments', 'message': 'Bad Arguments'}]})
# TODO: registration id verification
# eth address verification (if none is supplied, delete all the matching addresses)
eth_address = payload.get('address', None)
if eth_address and not validate_address(eth_address):
raise JSONHTTPError(data={'id': 'bad_arguments', 'message': 'Bad Arguments'})
async with self.db:
args = [toshi_id, service, payload['registration_id']]
if eth_address:
args.append(eth_address)
await self.db.execute(
"DELETE FROM notification_registrations WHERE toshi_id = $1 AND service = $2 AND registration_id = $3{}".format(
"AND eth_address = $4" if eth_address else ""),
*args)
await self.db.commit()
self.set_status(204)
self.track(toshi_id, "Deregistered ETH notifications")
class StatusHandler(RedisMixin, BaseHandler):
def get(self):
status = self.redis.get("monitor_sanity_check_ok")
if status == "OK":
self.write("OK")
else:
self.write("MONITOR SANITY CHECK FAILED")
class LegacyRegistrationHandler(RequestVerificationMixin, DatabaseMixin, BaseHandler):
"""backwards compatibility for old pn registration"""
async def post(self):
toshi_id = self.verify_request()
payload = self.json
if 'addresses' not in payload or len(payload['addresses']) == 0:
raise JSONHTTPError(400, body={'errors': [{'id': 'bad_arguments', 'message': 'Bad Arguments'}]})
addresses = payload['addresses']
for address in addresses:
if not validate_address(address):
raise JSONHTTPError(400, body={'errors': [{'id': 'bad_arguments', 'message': 'Bad Arguments'}]})
async with self.db:
# see if this toshi_id is already registered, listening to it's own toshi_id
rows = await self.db.fetch("SELECT * FROM | |
will return the
standard deviation of the data.
- If the original lightcurve contains a quality attribute, then the
bitwise OR of the quality flags will be returned per bin.
"""
available_methods = ['mean', 'median']
if method not in available_methods:
raise ValueError("method must be one of: {}".format(available_methods))
methodf = np.__dict__['nan' + method]
n_bins = self.flux.size // binsize
binned_lc = self.copy()
indexes = np.array_split(np.arange(len(self.time)), n_bins)
binned_lc.time = np.array([methodf(self.time[a]) for a in indexes])
binned_lc.flux = np.array([methodf(self.flux[a]) for a in indexes])
if np.any(np.isfinite(self.flux_err)):
# root-mean-square error
binned_lc.flux_err = np.array(
[np.sqrt(np.nansum(self.flux_err[a]**2))
for a in indexes]
) / binsize
else:
# Make them zeros.
binned_lc.flux_err = np.zeros(len(binned_lc.flux))
if hasattr(binned_lc, 'quality'):
# Note: np.bitwise_or only works if there are no NaNs
binned_lc.quality = np.array(
[np.bitwise_or.reduce(a) if np.all(np.isfinite(a)) else np.nan
for a in np.array_split(self.quality, n_bins)])
if hasattr(binned_lc, 'cadenceno'):
binned_lc.cadenceno = np.array([np.nan] * n_bins)
if hasattr(binned_lc, 'centroid_col'):
# Note: nanmean/nanmedian yield a RuntimeWarning if a slice is all NaNs
binned_lc.centroid_col = np.array(
[methodf(a) if np.any(np.isfinite(a)) else np.nan
for a in np.array_split(self.centroid_col, n_bins)])
if hasattr(binned_lc, 'centroid_row'):
binned_lc.centroid_row = np.array(
[methodf(a) if np.any(np.isfinite(a)) else np.nan
for a in np.array_split(self.centroid_row, n_bins)])
return binned_lc
def cdpp(self, **kwargs):
"""DEPRECATED: use `estimate_cdpp()` instead."""
warnings.warn('`LightCurve.cdpp()` is deprecated and will be '
'removed in Lightkurve v1.0.0, '
'please use `LightCurve.estimate_cdpp()` instead.',
LightkurveWarning)
return self.estimate_cdpp(**kwargs)
def estimate_cdpp(self, transit_duration=13, savgol_window=101,
savgol_polyorder=2, sigma=5.):
"""Estimate the CDPP noise metric using the Savitzky-Golay (SG) method.
A common estimate of the noise in a lightcurve is the scatter that
remains after all long term trends have been removed. This is the idea
behind the Combined Differential Photometric Precision (CDPP) metric.
The official Kepler Pipeline computes this metric using a wavelet-based
algorithm to calculate the signal-to-noise of the specific waveform of
transits of various durations. In this implementation, we use the
simpler "sgCDPP proxy algorithm" discussed by Gilliland et al
(2011ApJS..197....6G) and Van Cleve et al (2016PASP..128g5002V).
The steps of this algorithm are:
1. Remove low frequency signals using a Savitzky-Golay filter with
window length `savgol_window` and polynomial order `savgol_polyorder`.
2. Remove outliers by rejecting data points which are separated from
the mean by `sigma` times the standard deviation.
3. Compute the standard deviation of a running mean with
a configurable window length equal to `transit_duration`.
We use a running mean (as opposed to block averaging) to strongly
attenuate the signal above 1/transit_duration whilst retaining
the original frequency sampling. Block averaging would set the Nyquist
limit to 1/transit_duration.
Parameters
----------
transit_duration : int, optional
The transit duration in units of number of cadences. This is the
length of the window used to compute the running mean. The default
is 13, which corresponds to a 6.5 hour transit in data sampled at
30-min cadence.
savgol_window : int, optional
Width of Savitsky-Golay filter in cadences (odd number).
Default value 101 (2.0 days in Kepler Long Cadence mode).
savgol_polyorder : int, optional
Polynomial order of the Savitsky-Golay filter.
The recommended value is 2.
sigma : float, optional
The number of standard deviations to use for clipping outliers.
The default is 5.
Returns
-------
cdpp : float
Savitzky-Golay CDPP noise metric in units parts-per-million (ppm).
Notes
-----
This implementation is adapted from the Matlab version used by
<NAME> but lacks the normalization factor used there:
svn+ssh://murzim/repo/so/trunk/Develop/jvc/common/compute_SG_noise.m
"""
if not isinstance(transit_duration, int):
raise ValueError("transit_duration must be an integer in units "
"number of cadences, got {}.".format(transit_duration))
detrended_lc = self.flatten(window_length=savgol_window,
polyorder=savgol_polyorder)
cleaned_lc = detrended_lc.remove_outliers(sigma=sigma)
mean = running_mean(data=cleaned_lc.flux, window_size=transit_duration)
cdpp_ppm = np.std(mean) * 1e6
return cdpp_ppm
def _create_plot(self, method='plot', ax=None, normalize=True,
xlabel=None, ylabel=None, title='', style='lightkurve',
show_colorbar=True, colorbar_label='',
**kwargs):
"""Implements `plot()`, `scatter()`, and `errorbar()` to avoid code duplication.
Returns
-------
ax : matplotlib.axes._subplots.AxesSubplot
The matplotlib axes object.
"""
# Configure the default style
if style is None or style == 'lightkurve':
style = MPLSTYLE
# Default xlabel
if xlabel is None:
if self.time_format == 'bkjd':
xlabel = 'Time - 2454833 [BKJD days]'
elif self.time_format == 'btjd':
xlabel = 'Time - 2457000 [BTJD days]'
elif self.time_format == 'jd':
xlabel = 'Time [JD]'
else:
xlabel = 'Time'
# Default ylabel
if ylabel is None:
if normalize:
ylabel = 'Normalized Flux'
else:
ylabel = 'Flux [e$^-$s$^{-1}$]'
# Default legend label
if ('label' not in kwargs):
kwargs['label'] = self.label
# Normalize the data if requested
if normalize:
lc_normed = self.normalize()
flux, flux_err = lc_normed.flux, lc_normed.flux_err
else:
flux, flux_err = self.flux, self.flux_err
# Make the plot
with plt.style.context(style):
if ax is None:
fig, ax = plt.subplots(1)
if method == 'scatter':
sc = ax.scatter(self.time, flux, **kwargs)
# Colorbars should only be plotted if the user specifies, and there is
# a color specified that is not a string (e.g. 'C1') and is iterable.
if show_colorbar and ('c' in kwargs) and \
(not isinstance(kwargs['c'], str)) and hasattr(kwargs['c'], '__iter__'):
cbar = plt.colorbar(sc, ax=ax)
cbar.set_label(colorbar_label)
cbar.ax.yaxis.set_tick_params(tick1On=False, tick2On=False)
cbar.ax.minorticks_off()
elif method == 'errorbar':
ax.errorbar(x=self.time, y=flux, yerr=flux_err, **kwargs)
else:
ax.plot(self.time, flux, **kwargs)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
# Show the legend if labels were set
legend_labels = ax.get_legend_handles_labels()
if (np.sum([len(a) for a in legend_labels]) != 0):
ax.legend()
return ax
def plot(self, **kwargs):
"""Plot the light curve using Matplotlib's `~matplotlib.pyplot.plot()` method.
Parameters
----------
ax : matplotlib.axes._subplots.AxesSubplot
A matplotlib axes object to plot into. If no axes is provided,
a new one will be generated.
normalize : bool
Normalize the lightcurve before plotting?
xlabel : str
Plot x axis label
ylabel : str
Plot y axis label
title : str
Plot set_title
style : str
Path or URL to a matplotlib style file, or name of one of
matplotlib's built-in stylesheets (e.g. 'ggplot').
Lightkurve's custom stylesheet is used by default.
kwargs : dict
Dictionary of arguments to be passed to `matplotlib.pyplot.plot`.
Returns
-------
ax : matplotlib.axes._subplots.AxesSubplot
The matplotlib axes object.
"""
return self._create_plot(method='plot', **kwargs)
def scatter(self, colorbar_label='', show_colorbar=True, **kwargs):
"""Plots the light curve using Matplotlib's `~matplotlib.pyplot.scatter()` method.
Parameters
----------
ax : matplotlib.axes._subplots.AxesSubplot
A matplotlib axes object to plot into. If no axes is provided,
a new one will be generated.
normalize : bool
Normalize the lightcurve before plotting?
xlabel : str
Plot x axis label
ylabel : str
Plot y axis label
title : str
Plot set_title
style : str
Path or URL to a matplotlib style file, or name of one of
matplotlib's built-in stylesheets (e.g. 'ggplot').
Lightkurve's custom stylesheet is used by default.
colorbar_label : str
Label to show next to the colorbar (if `c` is given).
show_colorbar : boolean
Show the colorbar if colors are given using the `c` argument?
kwargs : dict
Dictionary of arguments to be passed to `matplotlib.pyplot.scatter`.
Returns
-------
ax : matplotlib.axes._subplots.AxesSubplot
The matplotlib axes object.
"""
return self._create_plot(method='scatter', colorbar_label=colorbar_label,
show_colorbar=show_colorbar, **kwargs)
def errorbar(self, linestyle='', **kwargs):
"""Plots the light curve using Matplotlib's `~matplotlib.pyplot.errorbar()` method.
Parameters
----------
ax : matplotlib.axes._subplots.AxesSubplot
A matplotlib axes object to plot into. If no axes is provided,
a new one will be generated.
normalize : bool
Normalize the lightcurve before plotting?
xlabel : str
Plot x axis label
ylabel : str
Plot y axis label
title : str
Plot set_title
style : str
Path or URL to a matplotlib style file, or name of one of
matplotlib's built-in stylesheets (e.g. 'ggplot').
Lightkurve's custom stylesheet is used by default.
linestyle : str
Connect the error bars using a line?
kwargs : dict
Dictionary of arguments to be passed to `matplotlib.pyplot.scatter`.
Returns
-------
ax : matplotlib.axes._subplots.AxesSubplot
The matplotlib axes object.
"""
if 'ls' not in kwargs:
kwargs['linestyle'] = linestyle
return self._create_plot(method='errorbar', **kwargs)
def interact_bls(self, notebook_url='localhost:8888', minimum_period=None,
maximum_period=None, resolution=2000):
"""Display an interactive Jupyter Notebook widget to find planets.
The Box Least Squares (BLS) periodogram is a statistical tool used
for detecting transiting exoplanets and eclipsing binaries in
light curves. This method will display a Jupyter Notebook Widget
which enables the BLS algorithm to be used interactively.
Behind the scenes, the widget uses the AstroPy implementation of BLS [1]_.
This feature only works inside an active Jupyter Notebook.
It requires | |
def test_no_invalid_return_scp(self):
"""Test that invalid role selection values aren't returned."""
# If the SCU proposes 0x00 we can't return 0x01
rq = build_context("1.2.3.4")
rq_roles = {"1.2.3.4": (True, False)}
ac = build_context("1.2.3.4")
ac.scu_role = True
ac.scp_role = True
result, roles = negotiate_as_acceptor([rq], [ac], rq_roles)
assert roles[0].sop_class_uid == "1.2.3.4"
assert roles[0].scu_role == True
assert roles[0].scp_role == False
def test_no_invalid_return_scu(self):
"""Test that invalid role selection values aren't returned."""
# If the SCU proposes 0x00 we can't return 0x01
rq = build_context("1.2.3.4")
rq_roles = {"1.2.3.4": (False, True)}
ac = build_context("1.2.3.4")
ac.scu_role = True
ac.scp_role = True
result, roles = negotiate_as_acceptor([rq], [ac], rq_roles)
assert roles[0].sop_class_uid == "1.2.3.4"
assert roles[0].scu_role == False
assert roles[0].scp_role == True
@pytest.mark.parametrize("req, acc, out", REFERENCE_ROLES)
def test_combination_role(self, req, acc, out):
"""Test that a combination of results works correctly."""
# No role selection
rq_contexts = []
ac_contexts = []
# 0x00 - accepted
rq_contexts.append(build_context("1.1.1"))
rq_contexts.append(build_context("1.1.2"))
rq_contexts.append(build_context("1.1.2"))
rq_contexts.append(build_context("1.1.3"))
ac_contexts.append(build_context("1.1.1"))
ac_contexts.append(build_context("1.1.2"))
ac_contexts.append(build_context("1.1.3"))
# 0x01 - user rejected - only achievable with role selection
# 0x02 - provider rejected - not achievable as acceptor
# 0x03 - abstract syyntax not supported
rq_contexts.append(build_context("1.4.1"))
rq_contexts.append(build_context("1.4.2"))
rq_contexts.append(build_context("1.4.2"))
rq_contexts.append(build_context("1.4.3"))
# 0x04 - transfer syntax not supported
rq_contexts.append(build_context("1.5.1", "1.2"))
rq_contexts.append(build_context("1.5.2", "1.2"))
rq_contexts.append(build_context("1.5.2", "1.2"))
rq_contexts.append(build_context("1.5.3", "1.2"))
ac_contexts.append(build_context("1.5.1"))
ac_contexts.append(build_context("1.5.2"))
ac_contexts.append(build_context("1.5.3"))
# Role selection
rq_roles = {}
# 0x00 - accepted and 0x01 - user rejected
for uid in ["2.1.1", "2.1.2", "2.1.2", "2.1.3"]:
rq_contexts.append(build_context(uid))
rq_roles[uid] = (req[0], req[1])
cx = build_context(uid)
cx.scu_role = acc[0]
cx.scp_role = acc[1]
ac_contexts.append(cx)
# 0x03 - abstract syntax not supported
for uid in ["2.4.1", "2.4.2", "2.4.2", "2.4.3"]:
rq_contexts.append(build_context(uid))
rq_roles[uid] = (req[0], req[1])
# 0x04 - transfer syntax not supported
for uid in ["2.5.1", "2.5.2", "2.5.2", "2.5.3"]:
rq_contexts.append(build_context(uid, "1.2"))
rq_roles[uid] = (req[0], req[1])
cx = build_context(uid)
cx.scu_role = acc[0]
cx.scp_role = acc[1]
ac_contexts.append(cx)
for ii, cx in enumerate(rq_contexts):
cx.context_id = (ii + 1) * 2 - 1
results, roles = negotiate_as_acceptor(rq_contexts, ac_contexts, rq_roles)
out_00 = [cx for cx in results if cx.result == 0x00]
out_01 = [cx for cx in results if cx.result == 0x01]
out_02 = [cx for cx in results if cx.result == 0x02]
out_03 = [cx for cx in results if cx.result == 0x03]
out_04 = [cx for cx in results if cx.result == 0x04]
out_na = [cx for cx in results if cx.result is None]
out_00_role = [cx for cx in out_00 if cx.abstract_syntax[0] == "2"]
# Unique UIDs with role selection
out_00_uids = set([cx.abstract_syntax for cx in out_00_role])
# If acceptor has None as role then no SCP/SCU role response
if None not in acc:
assert len(out_00_uids) == len(roles)
# Raw results
if out == CONTEXT_REJECTED:
assert len(out_01) == 4
assert len(out_00) == 4
else:
assert len(out_00) == 8
assert len(out_01) == 0
# Always
assert len(out_02) == 0
assert len(out_03) == 8
assert len(out_04) == 8
assert len(out_na) == 0
# Test individual results
assert out_00[0].abstract_syntax == "1.1.1"
assert out_00[1].abstract_syntax == "1.1.2"
assert out_00[2].abstract_syntax == "1.1.2"
assert out_00[3].abstract_syntax == "1.1.3"
for cx in out_00:
if cx.abstract_syntax[0] == "2":
assert cx.as_scu == out[2]
assert cx.as_scp == out[3]
else:
assert cx.as_scu is False
assert cx.as_scp is True
if out == CONTEXT_REJECTED:
assert out_01[0].abstract_syntax == "2.1.1"
assert out_01[1].abstract_syntax == "2.1.2"
assert out_01[2].abstract_syntax == "2.1.2"
assert out_01[3].abstract_syntax == "2.1.3"
for cx in out_01:
assert cx.as_scu is False
assert cx.as_scp is False
for cx in out_02:
assert cx.as_scu is False
assert cx.as_scp is False
for cx in out_03:
assert cx.as_scu is False
assert cx.as_scp is False
for cx in out_04:
assert cx.as_scu is False
assert cx.as_scp is False
class TestNegotiateAsRequestorWithRoleSelection:
"""Tests negotiate_as_requestor with role selection."""
@pytest.mark.parametrize("req, acc, out", REFERENCE_ROLES)
def test_scp_scu_role_negotiation(self, req, acc, out):
"""Test presentation service negotiation with role selection."""
rq = build_context("1.2.3.4")
rq.context_id = 1
rq.scu_role = req[0]
rq.scp_role = req[1]
ac = build_context("1.2.3.4")
ac.context_id = 1
ac.result = 0x0000
ac_roles = {"1.2.3.4": (acc[0], acc[1])}
result = negotiate_as_requestor([rq], [ac], ac_roles)
assert result[0].abstract_syntax == "1.2.3.4"
assert result[0].transfer_syntax[0] == "1.2.840.10008.1.2"
assert result[0].as_scu == out[0]
assert result[0].as_scp == out[1]
def test_multiple_contexts_same_abstract(self):
"""Test that SCP/SCU role neg works with multiple contexts."""
rq_contexts = [build_context("1.2.3.4"), build_context("1.2.3.4")]
for ii, context in enumerate(rq_contexts):
context.context_id = ii * 2 + 1
context.scu_role = False
context.scp_role = True
rq_contexts.append(build_context("1.2.3.4.5"))
rq_contexts[2].context_id = 5
ac_roles = {}
ac = build_context("1.2.3.4")
ac.context_id = 1
ac.result = 0x0000
ac_roles["1.2.3.4"] = (False, True)
ac2 = build_context("1.2.3.4.1")
ac2.context_id = 3
ac2.result = 0x0000
ac_roles["1.2.3.4.1"] = (False, True)
ac3 = build_context("1.2.3.4.5")
ac3.context_id = 5
ac3.result = 0x0000
result = negotiate_as_requestor(rq_contexts, [ac, ac2, ac3], ac_roles)
assert len(result) == 3
for context in result[:2]:
assert context.abstract_syntax == "1.2.3.4"
assert context.transfer_syntax[0] == "1.2.840.10008.1.2"
assert context.as_scu == False
assert context.as_scp == True
assert result[2].abstract_syntax == "1.2.3.4.5"
assert result[2].transfer_syntax[0] == "1.2.840.10008.1.2"
assert result[2].as_scu == True
assert result[2].as_scp == False
def test_functional(self):
"""Functional test of role negotiation."""
# Requestor
context_a = build_context(CompositeInstanceRetrieveWithoutBulkDataGet)
context_a.context_id = 1
context_b = build_context(CTImageStorage)
context_b.context_id = 3
rq_roles = {CTImageStorage: (False, True)}
rq_contexts = [context_a, context_b]
# Acceptor
context_a = build_context(CompositeInstanceRetrieveWithoutBulkDataGet)
context_b = build_context(CTImageStorage)
context_b.scu_role = False
context_b.scp_role = True
ac_contexts = [context_a, context_b]
# Requestor -> Acceptor
result, roles = negotiate_as_acceptor(rq_contexts, ac_contexts, rq_roles)
# Acceptor -> Requestor
ac_roles = {}
for role in roles:
ac_roles[role.sop_class_uid] = (role.scu_role, role.scp_role)
rq_contexts[1].scu_role = False
rq_contexts[1].scp_role = True
result = negotiate_as_requestor(rq_contexts, result, ac_roles)
assert result[0].abstract_syntax == CompositeInstanceRetrieveWithoutBulkDataGet
assert result[0].as_scu
assert not result[0].as_scp
assert result[1].abstract_syntax == CTImageStorage
assert not result[1].as_scu
assert result[1].as_scp
def test_acc_invalid_return_scp(self):
"""Test that the role negotiation is OK if given invalid SCP value."""
# Requestor
context_a = build_context(CTImageStorage)
context_a.context_id = 3
rq_roles = {CTImageStorage: (True, False)}
rq_contexts = [context_a]
# Acceptor
context_a = build_context(CompositeInstanceRetrieveWithoutBulkDataGet)
context_b = build_context(CTImageStorage)
context_b.scu_role = True
context_b.scp_role = True
ac_contexts = [context_a, context_b]
# Requestor -> Acceptor
result, roles = negotiate_as_acceptor(rq_contexts, ac_contexts, rq_roles)
# Force invalid SCP role response
roles[0].scp_role = True
# Acceptor -> Requestor
ac_roles = {}
for role in roles:
ac_roles[role.sop_class_uid] = (role.scu_role, role.scp_role)
rq_contexts[0].scu_role = True
rq_contexts[0].scp_role = False
result = negotiate_as_requestor(rq_contexts, result, ac_roles)
assert result[0].as_scu is True
assert result[0].as_scp is False
def test_acc_invalid_return_scu(self):
"""Test that the role negotiation is OK if given invalid SCU value."""
# Requestor
context_a = build_context(CTImageStorage)
context_a.context_id = 3
rq_roles = {CTImageStorage: (False, True)}
rq_contexts = [context_a]
# Acceptor
context_a = build_context(CompositeInstanceRetrieveWithoutBulkDataGet)
context_b = build_context(CTImageStorage)
context_b.scu_role = True
context_b.scp_role = True
ac_contexts = [context_a, context_b]
# Requestor -> Acceptor
result, roles = negotiate_as_acceptor(rq_contexts, ac_contexts, rq_roles)
# Force invalid SCU role response
roles[0].scu_role = True
# Acceptor -> Requestor
ac_roles = {}
for role in roles:
ac_roles[role.sop_class_uid] = (role.scu_role, role.scp_role)
rq_contexts[0].scu_role = False
rq_contexts[0].scp_role = True
result = negotiate_as_requestor(rq_contexts, result, ac_roles)
assert result[0].as_scu is False
assert result[0].as_scp is True
@pytest.mark.parametrize("req, acc, out", REFERENCE_ROLES)
def test_combination(self, req, acc, out):
"""Test that returned combinations work OK."""
## GENERATE ACCEPTOR RESPONSE
# No role selection
rq_contexts = []
ac_contexts = []
# 0x00 - accepted
rq_contexts.append(build_context("1.1.1"))
rq_contexts.append(build_context("1.1.2"))
rq_contexts.append(build_context("1.1.2"))
rq_contexts.append(build_context("1.1.3"))
ac_contexts.append(build_context("1.1.1"))
ac_contexts.append(build_context("1.1.2"))
ac_contexts.append(build_context("1.1.3"))
# 0x01 - user rejected - only achievable with role selection
# 0x02 - provider rejected - not achievable as acceptor
# 0x03 - abstract syyntax not supported
rq_contexts.append(build_context("1.4.1"))
rq_contexts.append(build_context("1.4.2"))
rq_contexts.append(build_context("1.4.2"))
rq_contexts.append(build_context("1.4.3"))
# 0x04 - transfer syntax not supported
rq_contexts.append(build_context("1.5.1", "1.2"))
rq_contexts.append(build_context("1.5.2", "1.2"))
rq_contexts.append(build_context("1.5.2", "1.2"))
rq_contexts.append(build_context("1.5.3", "1.2"))
ac_contexts.append(build_context("1.5.1"))
ac_contexts.append(build_context("1.5.2"))
ac_contexts.append(build_context("1.5.3"))
# Role selection
rq_roles = {}
# 0x00 - accepted and 0x01 - user rejected
for uid in ["2.1.1", "2.1.2", "2.1.2", "2.1.3"]:
rq_contexts.append(build_context(uid))
rq_roles[uid] = (req[0], req[1])
cx = build_context(uid)
cx.scu_role = acc[0]
cx.scp_role = acc[1]
ac_contexts.append(cx)
# 0x03 - abstract syntax not supported
for uid in ["2.4.1", "2.4.2", "2.4.2", "2.4.3"]:
rq_contexts.append(build_context(uid))
rq_roles[uid] = (req[0], req[1])
# 0x04 - transfer syntax not supported
for uid in ["2.5.1", "2.5.2", "2.5.2", "2.5.3"]:
rq_contexts.append(build_context(uid, "1.2"))
rq_roles[uid] = (req[0], req[1])
cx = build_context(uid)
cx.scu_role = acc[0]
cx.scp_role = acc[1]
ac_contexts.append(cx)
for ii, cx in enumerate(rq_contexts):
cx.context_id = (ii + 1) * 2 - 1
results, roles = negotiate_as_acceptor(rq_contexts, ac_contexts, rq_roles)
## TEST REQUESTOR NEGOTIATION
for cx in rq_contexts:
if "2" == cx.abstract_syntax[0]:
cx.scu_role = req[0]
cx.scp_role = req[1]
roles | |
<filename>mpcpy/units.py<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
``units`` classes manage the conversion of units for MPCPy variables. See
documentation on ``variables`` for more information.
"""
from abc import ABCMeta, abstractmethod
import numpy as np
#%% Display unit abstract interface
class _DisplayUnit(object):
__metaclass__ = ABCMeta;
@abstractmethod
def _define_quantity(self):
pass;
@abstractmethod
def _define_display_unit(self):
pass;
@abstractmethod
def _convert_to_base(self):
pass;
@abstractmethod
def _convert_from_base(self):
pass;
def __init__(self, variable):
self._define_quantity(variable);
self._define_display_unit();
#%% Display unit quantity implementation
class _Boolean(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Boolean';
variable.base_unit = boolean_integer;
class _Temperature(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Temperature';
variable.base_unit = K;
class _Power(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Power';
variable.base_unit = W;
class _Energy(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Energy';
variable.base_unit = J;
class _PowerFlux(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'PowerFlux';
variable.base_unit = W_m2;
class _EnergyIntensity(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'EnergyIntensity';
variable.base_unit = J_m2;
class _Pressure(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Pressure';
variable.base_unit = Pa;
class _DimensionlessRatio(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'DimensionlessRatio';
variable.base_unit = unit1;
class _Angle(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Angle';
variable.base_unit = rad;
class _Time(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Time';
variable.base_unit = s;
class _Mass(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Mass';
variable.base_unit = kg;
class _Length(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Length';
variable.base_unit = m;
class _Area(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Area';
variable.base_unit = m2;
class _Volume(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Volume';
variable.base_unit = m3;
class _MassFlow(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'MassFlow';
variable.base_unit = kg_s;
class _VolumetricFlow(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'VolumetricFlow';
variable.base_unit = m3_s;
class _Velocity(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Velocity';
variable.base_unit = m_s;
class _Illuminance(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Illuminance';
variable.base_unit = lx;
class _Luminance(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Luminance';
variable.base_unit = cd_m2;
class _EnergyPrice(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'EnergyPrice';
variable.base_unit = dol_J;
class _PowerPrice(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'PowerPrice';
variable.base_unit = dol_W;
class _SpecificHeatCapacity(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'SpecificHeatCapacity';
variable.base_unit = J_kgK;
class _HeatCapacity(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'HeatCapacity';
variable.base_unit = J_K;
class _HeatCapacityCoefficient(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'HeatCapacityCoefficient';
variable.base_unit = J_m2K;
class _HeatResistance(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'HeatResistance';
variable.base_unit = K_W;
class _HeatResistanceCoefficient(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'HeatResistanceCoefficient';
variable.base_unit = m2K_W;
class _HeatTransferCoefficient(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'HeatTransferCoefficient';
variable.base_unit = W_m2K;
class _Density(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Density';
variable.base_unit = kg_m3;
#%% Boolean display unit implementation
class boolean_integer(_Boolean):
def _define_display_unit(self):
self.name = 'boolean_integer';
def _convert_to_base(self, display_data):
base_data = int(display_data);
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
class boolean(_Boolean):
def _define_display_unit(self):
self.name = 'boolean';
def _convert_to_base(self, display_data):
base_data = int(display_data);
return base_data;
def _convert_from_base(self, base_data):
display_data = bool(base_data);
return display_data;
#%% Temperature display unit implementation
class K(_Temperature):
def _define_display_unit(self):
self.name = 'K';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
class degC(_Temperature):
def _define_display_unit(self):
self.name = 'degC';
def _convert_to_base(self, display_data):
base_data = display_data + 273.15;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data - 273.15;
return display_data;
class degF(_Temperature):
def _define_display_unit(self):
self.name = 'degF';
def _convert_to_base(self, display_data):
base_data = (display_data-32)*5/9 + 273.15;
return base_data;
def _convert_from_base(self, base_data):
display_data = (base_data-273.15)*9/5 + 32;
return display_data;
class degR(_Temperature):
def _define_display_unit(self):
self.name = 'degR';
def _convert_to_base(self, display_data):
base_data = ((display_data - 459.67)-32)*5/9 + 273.15;
return base_data;
def _convert_from_base(self, base_data):
display_data = (base_data-273.15)*9/5 + 32 + 459.67;
return display_data;
#%% Power display unit implementation
class W(_Power):
def _define_display_unit(self):
self.name = 'W';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
class kW(_Power):
def _define_display_unit(self):
self.name = 'kW';
def _convert_to_base(self, display_data):
base_data = display_data*1e3;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/1e3;
return display_data;
class MW(_Power):
def _define_display_unit(self):
self.name = 'MW';
def _convert_to_base(self, display_data):
base_data = display_data*1e6;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/1e6;
return display_data;
class Btuh(_Power):
def _define_display_unit(self):
self.name = 'Btuh';
def _convert_to_base(self, display_data):
base_data = display_data*0.29307107;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/0.29307107;
return display_data;
class kBtuh(_Power):
def _define_display_unit(self):
self.name = 'kBtuh';
def _convert_to_base(self, display_data):
base_data = (display_data*1e3)*0.29307107;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/0.29307107/1e3;
return display_data;
class hp(_Power):
def _define_display_unit(self):
self.name = 'hp';
def _convert_to_base(self, display_data):
base_data = display_data*745.699872;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/745.699872;
return display_data;
#%% Energy display unit implementation
class J(_Energy):
def _define_display_unit(self):
self.name = 'J';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
class kJ(_Energy):
def _define_display_unit(self):
self.name = 'kJ';
def _convert_to_base(self, display_data):
base_data = display_data*1e3;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/1e3;
return display_data;
class MJ(_Energy):
def _define_display_unit(self):
self.name = 'MJ';
def _convert_to_base(self, display_data):
base_data = display_data*1e6;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/1e6;
return display_data;
class Btu(_Energy):
def _define_display_unit(self):
self.name = 'Btu';
def _convert_to_base(self, display_data):
base_data = display_data*1055.05585;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/1055.05585;
return display_data;
class kBtu(_Energy):
def _define_display_unit(self):
self.name = 'kBtu';
def _convert_to_base(self, display_data):
base_data = (display_data*1e3)*1055.05585;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/1055.05585/1e3;
return display_data;
class Wh(_Energy):
def _define_display_unit(self):
self.name = 'Wh';
def _convert_to_base(self, display_data):
base_data = display_data*3600;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/3600;
return display_data;
class kWh(_Energy):
def _define_display_unit(self):
self.name = 'kWh';
def _convert_to_base(self, display_data):
base_data = display_data*1e3*3600;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/3600/1e3;
return display_data;
class MWh(_Energy):
def _define_display_unit(self):
self.name = 'MWh';
def _convert_to_base(self, display_data):
base_data = display_data*1e6*3600;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/3600/1e6;
return display_data;
#%% Power Flux display unit implementation
class W_m2(_PowerFlux):
def _define_display_unit(self):
self.name = 'W/m2';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
class kW_m2(_PowerFlux):
def _define_display_unit(self):
self.name = 'kW/m2';
def _convert_to_base(self, display_data):
base_data = display_data*1e3;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/1e3;
return display_data;
class W_sf(_PowerFlux):
def _define_display_unit(self):
self.name = 'W/sf';
def _convert_to_base(self, display_data):
base_data = display_data*10.7639;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/10.7639;
return display_data;
class kW_sf(_PowerFlux):
def _define_display_unit(self):
self.name = 'kW/sf';
def _convert_to_base(self, display_data):
base_data = display_data*1e3*10.7639;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/10.7639/1e3;
return display_data;
class Btuh_sf(_PowerFlux):
def _define_display_unit(self):
self.name = 'Btuh/sf';
def _convert_to_base(self, display_data):
base_data = display_data*3.154594;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/3.154594;
return display_data;
class kBtuh_sf(_PowerFlux):
def _define_display_unit(self):
self.name = 'kBtuh/sf';
def _convert_to_base(self, display_data):
base_data = display_data*1e3*3.154594;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/3.154594/1e3;
return display_data;
#%% Energy Intensity display unit implementation
class J_m2(_EnergyIntensity):
def _define_display_unit(self):
self.name = 'J/m2';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
class Wh_m2(_EnergyIntensity):
def _define_display_unit(self):
self.name = 'Wh/m2';
def _convert_to_base(self, display_data):
base_data = display_data*3600;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/3600;
return display_data;
class kWh_m2(_EnergyIntensity):
def _define_display_unit(self):
self.name = 'kWh/m2';
def _convert_to_base(self, display_data):
base_data = display_data*1e3*3600;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/3600/1e3;
return display_data;
class Wh_sf(_EnergyIntensity):
def _define_display_unit(self):
self.name = 'Wh/sf';
def _convert_to_base(self, display_data):
base_data = display_data*3600*10.7639;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/3600/10.7639;
return display_data;
class kWh_sf(_EnergyIntensity):
def _define_display_unit(self):
self.name = 'kWh/sf';
def _convert_to_base(self, display_data):
base_data = display_data*1e3*3600*10.7639;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/3600/10.7639/1e3;
return display_data;
class Btu_sf(_EnergyIntensity):
def _define_display_unit(self):
self.name = 'Btu/sf';
def _convert_to_base(self, display_data):
base_data = display_data*1055.05585*10.7639;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/1055.05585/10.7639;
return display_data;
class kBtu_sf(_EnergyIntensity):
def _define_display_unit(self):
self.name = 'kBtu/sf';
def _convert_to_base(self, display_data):
base_data = display_data*1e3*1055.05585*10.7639;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/1055.05585/10.7639/1e3;
return display_data;
#%% Pressure display unit implementation
class Pa(_Pressure):
def _define_display_unit(self):
self.name = 'Pa';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
class kPa(_Pressure):
def _define_display_unit(self):
self.name = 'kPa';
def _convert_to_base(self, display_data):
base_data = display_data*1e3;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/1e3;
return display_data;
class MPa(_Pressure):
def _define_display_unit(self):
self.name = 'MPa';
def _convert_to_base(self, display_data):
base_data = display_data*1e6;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/1e6;
return display_data;
class bar(_Pressure):
def _define_display_unit(self):
self.name = 'bar';
def _convert_to_base(self, display_data):
base_data = display_data*1e5;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/1e5;
return display_data;
class inwg(_Pressure):
def _define_display_unit(self):
self.name = 'inwg';
| |
<gh_stars>0
# This is the version for EpiAlignment Web Service
import argparse
import sys
import copy
import math
from math import log, exp
from time import time
from multiprocessing import *
from functools import partial
class HomoRegion:
'''
class of homologous regions.
'''
def __init__(self):
self.S1 = []
self.S2 = []
self.L = 0
self.name = ""
self.averagedL = 0
self.loc2 = -1
self.loc1 = -1
self.start_point = 0
self.prob = []
self.S1_path = ""
self.S2_path = ""
def ParseArg():
p = argparse.ArgumentParser(description="EpiAlignment. A semi-global alignment algorithm for chromosomal similarity search.")
p.add_argument("Input", type=str, help="Input file name.")
p.add_argument(
"-e",
"--equil_file",
type=str,
help="The parameter file containing intial guesses for s, mu, k, equilibrium probabilities and weights.")
p.add_argument("-p", "--process_num", type=int, default=1, help="Number of processes to be used. Default: 1.")
p.add_argument(
"-o",
"--output",
type=str,
help="Output file name. This file contains region name, alignment scores and target position for each region pair.")
p.add_argument(
"-O",
"--out_allvec",
type=str,
help="Output file name. This file contains the last rows of the alignment matrices (alignment scores across the search regions). Only available when --all_prob is specified.")
p.add_argument(
"-r",
"--align_path",
type=str,
help="Alignment path file name. The alignment path will be output if specified. WARNING: The reconstruction of alignment paths has excessive memory demand. Use only when input sequence number is small. ")
if len(sys.argv) == 1:
print >>sys.stderr, p.print_help()
sys.exit(0)
return p.parse_args()
def ReadInput(fin_name):
'''
Read the input file.
fin_name: input file name, which is the the input parameter Input.
return: a list of input region pairs. Each element is a HomoRegion object.
'''
Slist = []
s1_count = 0
s2_count = 0
flag = 0
with open(fin_name, "r") as fin:
S = []
s1_maxlen = 0
s2_maxlen = 0
s1_avelen = 0
s2_avelen = 0
line = fin.readline().strip()
if "@" not in line:
raise Exception(301, "The first line of the input file does not start with @.")
flag = 1
while True:
line = fin.readline().strip()
if len(line) == 0:
if s1_count > s2_count:
Sobj.S2 = S
Slist.append(Sobj)
s2_count += 1
if len(Sobj.S1) > s1_maxlen:
s1_maxlen = len(Sobj.S1)
if len(Sobj.S2) > s2_maxlen:
s2_maxlen = len(Sobj.S2)
s1_avelen += len(Sobj.S1)
s2_avelen += len(Sobj.S2)
else:
raise Exception(302, "The number of sequences are different!")
break
if line == "+":
i = 0
flag = 0
continue
if "@" in line:
if s1_count > s2_count:
Sobj.S2 = S
Slist.append(Sobj)
s2_count += 1
if len(Sobj.S1) > s1_maxlen:
s1_maxlen = len(Sobj.S1)
if len(Sobj.S2) > s2_maxlen:
s2_maxlen = len(Sobj.S2)
s1_avelen += len(Sobj.S1)
s2_avelen += len(Sobj.S2)
else:
Sobj = HomoRegion()
Sobj.name = line[1:]
Sobj.S1 = S
s1_count += 1
flag = 1
S = []
continue
if flag == 1:
line = line.upper()
S += [(x, "") for x in line]
else:
S = S[0:i] + [(a[0], a[1] + b) for a, b in zip(S[i:(i + len(line))], line)] + S[(i + len(line)):]
i += len(line)
return Slist, s1_maxlen, s2_maxlen, s1_avelen / len(Slist), s2_avelen / len(Slist)
def ReadParameters(f_name):
'''
Read parameters. Build the dictionaries of equilibrium probabilities on the linear and log scales.
f_name: parameter file name, which is specified by --equil_file.
Sample return:
parameter vector x: [0.1, 0.01, 0.1]
weights: [1.0, 0.0]
equil_dic: {'A': 0.25, 1: [0.9, 0.1], 'C': 0.25, 'T': 0.25, 'G': 0.25}
log_equil_dict: {'A': -1.386, 1: [-0.105, -2.303], 'C': -1.386, 'T': -1.386, 'G': -1.386}
'''
n_epi = 0
x = []
weights = []
equil_dict = {}
log_equil_dict = {}
with open(f_name, "r") as fin:
x.append(float(fin.readline().strip()))
x.append(float(fin.readline().strip()))
for line in fin:
line = line.strip().split("\t")
if len(line) > 1:
if n_epi == 0:
for item in line:
equil_dict[item.split(":")[0]] = float(item.split(":")[1])
log_equil_dict[item.split(":")[0]] = log(float(item.split(":")[1]))
n_epi += 1
elif n_epi > 0:
if ":" in line[0]:
equil_dict[n_epi] = [0.0, 0.0]
log_equil_dict[n_epi] = [0.0, 0.0]
for item in line:
equil_dict[n_epi][int(item.split(":")[0])] = float(item.split(":")[1])
log_equil_dict[n_epi][int(item.split(":")[0])] = log(float(item.split(":")[1]))
n_epi += 1
else:
weights = [float(n) for n in line]
else:
x.append(float(line[0]))
return x, weights, equil_dict, log_equil_dict
# def Log_sum(lnA, lnB, lnC, lnD):
# '''
# tmp=[logA, logB, logC, logD]
# return log(A+B+C-D)
# '''
# tm = max(lnA, lnB, lnC, lnD)
# sm = exp(lnA - tm) + exp(lnB - tm) + exp(lnC - tm) - exp(lnD - tm)
# try:
# return tm + log(sm)
# except:
# return float("-Inf")
# def Log_sum3(lnA, lnB, lnC):
# '''
# only for path_len_mat
# tmp=[logA, logB, logC]
# return log(A+B+C)
# '''
# tm = max(lnA, lnB, lnC)
# sm = exp(lnA - tm) + exp(lnB - tm) + exp(lnC - tm)
# try:
# return tm + log(sm)
# except:
return float("-Inf")
# def Path_matrix(mlen1, mlen2, mu, lamb, beta, link_p, log_link_p):
# '''
# Pre-compute the path matrix for normalization.
# mlen1, mlen2: maximal lengths of ancestral and descendent regions.
# mu, lamb, beta, link_p: parameters
# return: a mlen1 * mlen2 matrix.
# '''
# if mlen1 > mlen2:
# tmp = mlen2
# mlen2 = mlen1
# mlen1 = tmp
# if mlen1 > mlen2:
# mlen2 = mlen1
# else:
# mlen1 = mlen2
# path_mat3 = [[Na] * (mlen2 + 1) for i in xrange(mlen1 + 1)]
# path_mat2 = [[Na] * (mlen2 + 1) for i in xrange(mlen1 + 1)]
# link_p12 = max(link_p[1], link_p[2])
# log_link_p12 = log(link_p12)
# log_lamb_mu = log(lamb / mu)
# lamb_beta = lamb * beta
# log_lamb_beta = log(lamb_beta)
# path_mat3[0][0] = log_link_p[3] + log(Gamma(0,lamb,mu))
# path_mat3[1][0] = log_link_p[3] + log_link_p[0] + log(Gamma(0,lamb,mu)) + log_lamb_mu
# path_mat3[0][1] = log_link_p[3] + log_lamb_beta + log(Gamma(0,lamb,mu))
# path_mat2[0][1] = log_link_p[3] + log_lamb_beta + log(Gamma(0,lamb,mu))
# # First row
# for i in xrange(2, mlen2 + 1):
# path_mat3[0][i] = path_mat3[0][i-1] + log_lamb_beta
# path_mat2[0][i] = path_mat2[0][i-1] + log_lamb_beta
# # First column
# for i in xrange(2, mlen1 + 1):
# path_mat3[i][0] = path_mat3[i-1][0] + log_link_p[0] + log_lamb_mu
# for i in xrange(1, (mlen1 + 1)):
# for j in xrange(1, (mlen2 + 1)):
# ent0 = log_lamb_mu + log_link_p[0] + path_mat3[i - 1][j]
# ent1 = log_link_p12 + log_lamb_mu + path_mat3[i - 1][j - 1]
# ent2 = log_lamb_beta + path_mat2[i][j - 1]
# max_v3, max_v2, tup_ind1, tup_ind2 = Maximum(ent0, ent1, ent2, j)
# #print "max"
# #print max_v3, max_v2
# #print path_mat3
# #print path_mat2
# #print "\n"
# path_mat3[i][j] = max_v3
# path_mat2[i][j] = max_v2
# # for line in path_mat3:
# # print " ".join([str(f) for f in line])
# # print "\n"
# return path_mat3
# def Mod_equilibrium(e_dict, log_e_dict, weights):
# '''
# Scale the equilibrium probabilities for DNA bases by sequence weight.
# e_dict, log_e_dict: dictionaries of equilibrium probabilities on the linear and log scales. equil_dict and log_equil_dict returned by ReadParameters.
# weights: the weights vector returned by ReadParameters.
# return: None. The function will modify the dictionaries directly.
# '''
# for b in "A", "C", "G", "T":
# e_dict[b] = e_dict[b]**weights[0]
# log_e_dict[b] = log_e_dict[b] * weights[0]
def Epi_equilibrium(n_epi, equil_dict, log_equil_dict, weights):
'''
Products of the equilibrium probabilities of epigenetic marks.
The function will iterate all possible combinations of '1's and '0's (k) and compute the equilibrium probability of observing the combination.
For example, if there are two epi-marks, all possible k's will be '00', '01', '10', '11'.
The equilibrium probabilities are scaled by epi-weights.
n_epi: number of epigenomic marks.
weights: the weights vector.
return: two dictionaries, in which the keys are combination of '1's and '0's and values are equilibrium probabilities.
'''
S_epi = {}
log_S_epi = {}
for i in xrange(pow(2, n_epi)):
k = bin(i)[2:].zfill(n_epi)
v = 1.0
lv = 0.0
for j in xrange(0, n_epi):
v = v * (equil_dict[j + 1][int(k[j])]) ** weights[j + 1]
lv += weights[j + 1] * log_equil_dict[j + 1][int(k[j])]
S_epi[k] = v
log_S_epi[k] = lv
return S_epi, log_S_epi
def Equilibrium_matrix(log_equil_dict, log_S_epi, weights):
'''
Products of the equilibrium probabilities of bases and epigenetic marks.
The base equilibrium probabilities are scaled by sequence-weights.
return: a dictionary. Keys: base - epi-state. Values: scaled equilibrium probabilities.
'''
log_equil_mat = {}
for base in "A", "C", "G", "T":
log_equil_mat[base] = {}
for epi in log_S_epi:
log_equil_mat[base][epi] = log_equil_dict[base] * weights[0] + log_S_epi[epi]
return log_equil_mat
def Link_prob(prime, n, b, lam, mu):
"""
Compute p', p'', p''' defined in the TKF DNA evolutionary model.
prime: the prime of p (0, 1 or 2)
n: the subscript of p.
b, lam, mu: the value of beta, lambda and mu. See the function Manhattan for the definition of beta.
return: p'_0, p_1, p'_1 or p''_1
"""
| |
sheet.write(i,j, row.id)
j+=1
if 'name' in options:
sheet.write(i,j, row.mol.name)
j+=1
if 'names' in options:
names = u''
for n in row.mol.names:
names += n.name + u', '
sheet.write(i,j, names)
j+=1
if 'image' in options:
file_in = './molgears/public/img/%s.png' % row.gid
img = Image.open(file_in)
file_out = './molgears/public/img/bitmap/thumb%s.bmp' %row.gid
img.thumbnail(size, Image.ANTIALIAS)
img.save(file_out)
sheet.insert_bitmap(file_out , i,j, 5, 5)
j+=1
if 'smiles' in options:
sheet.write(i,j, str(row.mol.structure))
j+=1
if 'inchi' in options:
sheet.write(i,j, str(row.mol.inchi))
j+=1
if 'num_atoms' in options:
sheet.write(i,j,str(row.mol.num_hvy_atoms)+'/'+str(row.mol.num_atoms))
j+=1
if 'mw' in options:
sheet.write(i,j, str(row.mol.mw))
j+=1
if 'logp' in options:
sheet.write(i,j, str(row.mol.logp))
j+=1
if 'hba' in options:
sheet.write(i,j, str(row.mol.hba))
j+=1
if 'hbd' in options:
sheet.write(i,j, str(row.mol.hbd))
j+=1
if 'tpsa' in options:
sheet.write(i,j, str(row.mol.tpsa))
j+=1
if 'create_date' in options:
sheet.write(i,j, str(row.create_date))
j+=1
if 'owner' in options:
sheet.write(i,j, row.owner)
j+=1
if 'principal' in options:
sheet.write(i,j, row.principal)
j+=1
if 'priority' in options:
sheet.write(i,j, row.priority)
j+=1
if 'status' in options:
sheet.write(i,j, row.status.name)
j+=1
if 'tags' in options:
tagsy=u''
for tag in row.mol.tags:
tagsy += tag.name + u', '
sheet.write(i,j,tagsy)
j+=1
if 'notes' in options:
sheet.write(i,j, row.notes)
j+=1
i += 1
wbk.save(filepath)
import paste.fileapp
f = paste.fileapp.FileApp(filepath)
from tg import use_wsgi_app
return use_wsgi_app(f)
elif kw['file_type'] == 'csv' or 'txt':
filename = userid + '_selected.' + kw['file_type']
filepath = os.path.join('./molgears/files/download/', filename)
from molgears.widgets.unicodeCSV import UnicodeWriter
import csv
if kw['file_type'] == u'csv':
delimiter = ';'
else:
delimiter = ' '
with open(filepath, 'wb') as csvfile:
spamwriter = UnicodeWriter(csvfile, delimiter=delimiter,
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in pcompounds:
line =[]
if 'smiles' in options:
line.append(str(row.mol.structure))
if 'name' in options:
line.append(row.mol.name)
if 'nr' in options:
line.append(unicode(pcompounds.index(row)+1))
if 'gid' in options:
line.append(unicode(row.gid))
if 'id' in options:
line.append(unicode(row.id))
if 'names' in options:
names = u''
for n in row.mol.names:
names += n.name + u', '
line.append(names)
if 'inchi' in options:
line.append(row.mol.inchi)
if 'num_atoms' in options:
line.append(unicode(row.mol.num_hvy_atoms)+'/'+unicode(row.mol.num_atoms))
if 'mw' in options:
line.append(unicode(row.mol.mw))
if 'logp' in options:
line.append(unicode(row.mol.logp))
if 'hba' in options:
line.append(unicode(row.mol.hba))
if 'hbd' in options:
line.append(unicode(row.mol.hbd))
if 'tpsa' in options:
line.append(unicode(row.mol.tpsa))
if 'create_date' in options:
line.append(unicode(row.create_date))
if 'owner' in options:
line.append(row.owner)
if 'principal' in options:
line.append(row.principal)
if 'priority' in options:
line.append(unicode(row.priority))
if 'status' in options:
line.append(row.status.name)
if 'tags' in options:
tagsy= ''
for tag in row.mol.tags:
tagsy += tag.name + ', '
line.append(tagsy)
if 'notes' in options:
line.append(row.notes)
spamwriter.writerow(line)
import paste.fileapp
f = paste.fileapp.FileApp(filepath)
from tg import use_wsgi_app
return use_wsgi_app(f)
if selection and not search_clicked:
argv =''
for arg in selection:
argv += '/' + arg
if kw['akcja'] == u'edit':
if len(selection) == 1:
redirect('/%s/select/edit%s' % (pname, argv))
else:
redirect('/%s/select/multiedit/index%s' % (pname, argv))
elif kw['akcja'] == u'accept':
if len(selection) == 1:
redirect('/%s/select/accept%s' % (pname, argv))
else:
redirect('/%s/select/multiaccept/index%s' % (pname, argv))
elif kw['akcja'] == u'delete':
if len(selection) == 1:
redirect('/%s/select/post_delete%s' % (pname, argv))
else:
redirect('/%s/select/multidelete/index%s' % (pname, argv))
else:
redirect('/%s/select/%s%s' % (pname, kw['akcja'], argv))
currentPage = paginate.Page(pcompound, page, url=page_url, items_per_page=items)
return dict(currentPage=currentPage, tmpl=tmpl, page='select', pname=pname, similarity=similarity, alltags=alltags, allstatus=allstatus, ulists=ulists, ulist=ulist)
@expose('molgears.templates.users.select.edit')
def edit(self, id):
pname = request.environ['PATH_INFO'].split('/')[1]
pid = int(id)
pcompound = DBSession.query( PCompound ).filter_by(id=pid).join(PCompound.mol).filter(Compound.project.any(Projects.name==pname)).first()
principals = DBSession.query (Group).get(3)
if not pcompound:
flash(l_(u'Permission denied'), 'warning')
redirect(request.headers['Referer'])
alltags =[tag for tag in DBSession.query(Tags).order_by('name').all() ]
try:
tags = [tag for tag in pcompound.mol.tags]
except Exception:
tags = [pcompound.mol.tags]
pass
come_from = request.headers['Referer']
return dict(pcompound=pcompound, alltags=alltags, tags=tags, come_from=come_from, page='select', pname=pname, users=principals.users)
@expose()
def put(self, *args, **kw):
pname = request.environ['PATH_INFO'].split('/')[1]
# project = DBSession.query(Projects).filter(Projects.name==pname).first()
pid = int(args[0])
userid = request.identity['repoze.who.userid']
pcompound = DBSession.query(PCompound).filter_by(id=pid).join(PCompound.mol).filter(Compound.project.any(Projects.name==pname)).first()
try:
if isinstance(kw['text_tags'], basestring):
tagi = [DBSession.query( Tags ).get(int(kw['text_tags']))]
else:
tagi = [DBSession.query( Tags ).get(int(id)) for id in kw['text_tags']]
except Exception as msg:
flash(l_(u'Tags error: %s' %msg))
redirect(request.headers['Referer'])
try:
notes = kw['notes']
except Exception:
notes = None
pass
pchanges = u''
if kw and kw.has_key('principal'):
if kw['principal'] != pcompound.principal:
pcompound.principal = kw['principal']
pchanges += u' Odbiorca: ' + kw['principal'] + u';'
if notes and notes != pcompound.notes:
pcompound.notes = notes
pchanges += u' Notes: ' + notes + u';'
pchanges += u' Tags: '
if tagi != pcompound.mol.tags:
for tag in tagi:
pchanges += str(tag.name) + '; '
pcompound.mol.tags = tagi
if int(kw['priority']) != pcompound.priority:
pchanges += u' Priorytet' + str(kw['priority']) + u';'
pcompound.priority = int(kw['priority'])
scompound = DBSession.query(SCompound).filter_by(pid=pid).all()
if scompound:
for sc in scompound:
sc.priority = int(kw['priority'])
shistory = SHistory()
shistory.project = pname
shistory.date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
shistory.user = userid
shistory.status = u'Priority'
shistory.changes = u'Priority:' + kw['priority']
sc.history += [shistory]
DBSession.add(shistory)
phistory = PHistory()
phistory.project = pname
phistory.date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
phistory.user = userid
phistory.status = u'Edycja'
phistory.changes = pchanges
phistory.gid = pcompound.gid
pcompound.history += [phistory]
DBSession.add(phistory)
DBSession.flush()
if kw and kw.has_key('come_from'):
come_from = kw['come_from']
else:
come_from = request.headers['Referer']
flash(l_(u'Task completed successfully'))
redirect(come_from)
@expose()
def reject(self, *args, **kw):
"""
Chemist rejection of synthesis (if synthesis status <=2 and is synthesis owner).
Chamge status of synthesis compound as rejected and set request compound status as canceled.
"""
pname = request.environ['PATH_INFO'].split('/')[1]
# project = DBSession.query(Projects).filter(Projects.name==pname).first()
userid = request.identity['repoze.who.userid']
try:
come_from = request.headers['Referer']
except Exception:
come_from = request.path_url
if args:
for arg in args:
try:
pcompound = DBSession.query(PCompound).join(PCompound.mol).filter(Compound.project.any(Projects.name==pname)).filter(PCompound.id==int(arg)).first()
except Exception:
flash(l_(u'Compound number error'), 'error')
redirect(come_from)
if pcompound:
scompounds = DBSession.query(SCompound).join(SCompound.mol).filter(Compound.project.any(Projects.name==pname)).filter(SCompound.pid==pcompound.id).all()
if scompounds:
for scompound in scompounds:
if scompound.status_id <= 2 and (scompound.owner == userid or has_permission('kierownik')):
shistory = SHistory()
shistory.gid = scompound.mol.gid
shistory.project = pname
shistory.date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
shistory.user = userid
shistory.status = u'Reject'
shistory.changes = u'Reject synthesis compound of GID %s (ID projektowe %s)' % (scompound.gid, arg)
scompound.status = DBSession.query(SStatus).get(5)
scompound.history += [shistory]
DBSession.add(shistory)
phistory = PHistory()
phistory.gid = pcompound.gid
phistory.project = pname
phistory.date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
phistory.user = userid
phistory.status = u'Reject'
phistory.changes = u'Reject requests compound of GID %s (request ID %s)' % (pcompound.gid, pcompound.id)
pcompound.status = DBSession.query(PStatus).get(3)
pcompound.history += [phistory]
DBSession.add(phistory)
else:
flash(l_(u'Request compound error'), 'error')
redirect(come_from)
DBSession.flush()
flash(l_(u'Task completed successfully'))
redirect(come_from)
else:
flash(l_(u'Select Compounds'), 'error')
redirect(come_from)
@expose()
def withdraw(self, *args, **kw):
"""
Manager withdraw of synthesis (if synthesis status <=2).
Chamge status of synthesis compound as discontinue and set request compound status as canceled.
"""
pname = request.environ['PATH_INFO'].split('/')[1]
# project = DBSession.query(Projects).filter(Projects.name==pname).first()
userid = request.identity['repoze.who.userid']
try:
come_from = request.headers['Referer']
except Exception:
come_from = request.path_url
if has_permission('kierownik'):
if args:
for arg in args:
try:
pcompound = DBSession.query(PCompound).join(PCompound.mol).filter(Compound.project.any(Projects.name==pname)).filter(PCompound.id==int(arg)).first()
except Exception:
flash(l_(u'Compound number error'), 'error')
redirect(come_from)
if pcompound:
scompounds = DBSession.query(SCompound).join(SCompound.mol).filter(Compound.project.any(Projects.name==pname)).filter(SCompound.pid==pcompound.id).all()
if scompounds:
for scompound in scompounds:
if scompound.status_id <= 2:
shistory = SHistory()
shistory.gid = scompound.mol.gid
shistory.project = pname
shistory.date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
shistory.user = userid
shistory.status = u'Withdraw'
shistory.changes = u'Withdraw synthesis compound of GID %s (ID projektowe %s)' % (scompound.gid, arg)
scompound.status = DBSession.query(SStatus).get(9)
scompound.history += [shistory]
DBSession.add(shistory)
phistory = PHistory()
phistory.gid = pcompound.gid
phistory.project = pname
phistory.date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
phistory.user = userid
phistory.status = u'Withdraw'
phistory.changes = u'Withdraw requests compound of GID %s (request ID %s)' % (pcompound.gid, pcompound.id)
pcompound.status = DBSession.query(PStatus).get(3)
pcompound.history += [phistory]
DBSession.add(phistory)
else:
flash(l_(u'Request compound error'), 'error')
redirect(come_from)
DBSession.flush()
flash(l_(u'Task completed successfully'))
redirect(come_from)
else:
flash(l_(u'Select Compounds'), 'error')
redirect(come_from)
else:
flash(l_(u'Permission denied'), 'error')
redirect(come_from)
@expose()
def post_delete(self, *args, **kw):
pname = request.environ['PATH_INFO'].split('/')[1]
"""This is the code that actually deletes the record"""
pid = int(args[0])
pcompound = DBSession.query(PCompound).filter_by(id=pid).join(PCompound.mol).filter(Compound.project.any(Projects.name==pname)).first()
userid = request.identity['repoze.who.userid']
if pcompound.status != DBSession.query(PStatus).get(2):
phistory = PHistory()
phistory.gid = pcompound.gid
phistory.user = userid
# project = DBSession.query(Projects).filter(Projects.name==pname).first()
phistory.project = pname
phistory.date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
phistory.status = 'Usuwanie'
phistory.changes = 'Usunięcie związku o GID %s (ID projektowe %s) z tabeli projektowej' % (pcompound.gid, pid)
# phistory.pcompound_id = pid
DBSession.delete(pcompound)
DBSession.add(phistory)
DBSession.flush()
flash(l_(u'Task completed successfully'))
else:
flash(l_(u'Permission denied'), 'error') #Nie mozna usuwac zaakceptowanych zwiazkow.
redirect(request.headers['Referer'])
@expose("molgears.templates.users.select.accept")
def accept(self, id):
pname = request.environ['PATH_INFO'].split('/')[1]
pid = int(id)
pcompound = DBSession.query( PCompound ).filter_by(id=pid).join(PCompound.mol).filter(Compound.project.any(Projects.name==pname)).first()
principals = DBSession.query (Group).get(3)
come_from = request.headers['Referer']
if pcompound.status_id !=1:
flash(l_(u'Status error. Permission denied'), 'error')
alltags =[tag for tag in DBSession.query(Tags).order_by('name').all() ]
try:
tags = [tag for tag in pcompound.mol.tags]
except Exception:
tags = [pcompound.mol.tags]
pass
return dict(pcompound=pcompound, alltags=alltags, tags=tags, users=principals.users, default_user = pcompound.principal, come_from=come_from, page='select', pname=pname)
@expose()
def add_to_synthesis(self, id, **kw):
pname = request.environ['PATH_INFO'].split('/')[1]
pid = int(id)
try:
etap_max = int(kw['etap_max'])
if isinstance(kw['text_tags'], basestring):
| |
# -*- coding: utf-8 -*-
# This is a auxilliary class of neuralflow package
"""This is a part of neuralflow package/EnergyModel class.
Solves Stourm-Liouville problem:
(D*p(x)y')'+q(x)y=lambda w(x) y
with specified boundary conditions.
Also performs additional EVP solving to find the eigenvalues and eigenvectors of H operator.
"""
import numpy as np, numpy.matlib
import numbers
from scipy import sparse, linalg
from numpy.polynomial import legendre
from .rank_nullspace import nullspace
from functools import reduce
from itertools import combinations
from operator import mul
MACHINE_EPSILON = np.finfo(np.double).eps
class PDESolve:
"""Numerical solution of Stourm-Liouville problem
Parameters
----------
xbegin : float
The left boundary of the latent state. The default is -1.
xend : float
The right boundary of the latent state. The default is 1.
method : dictionary
A dictionary that contains 2 keys:
name : string
Specifies the method for the numerical solution of EV problem, can be either 'FD' or 'SEM' (forward differences or spectral elements method).
gridsize : dictionary
Specifies number of grid size points N for 'FD' method, or Np and Ne for 'SEM' method (Ne is the number of elements, Np is the number of grid points in each element).
The default is {'name': 'SEM', 'gridsize': {'Np': 8, 'Ne': 256}}.
BoundCond : dictionary
A dictionary that specifies boundary conditions (Dirichlet, Neumann or Robin).
The default is {'leftB': 'Neumann', 'rightB': 'Neumann'}
grid_mode_calc : str
Specify how to calculate SEM grid collocation points.
Availiable options:
'built_in': using numpy.polynomial module
'newton': using Newton's method to calculate zeros of Legendre polinomial for the GLL grid
The default is 'newton'.
BC_method : str
Specify the method of boundary condition handling when transforming the EV problem into linear system of equations.
Availiable options:
'projection': use projection operator.
'bound_subst': use boundary condition substitution into the first and the last equations of the associated linear system.
The default is 'projection'
int_mode : str
Specify the integration mode.
Availiable options:
'full' - use full integration matrix.
'sparse' - use sparse integration matrix with bias.
The default is 'full'. See Supplementary Materials 2.3 from <NAME>, <NAME>, Nat Mach Intell 2, 674–683 (2020) for details.
Attributes
----------
AD_d : numpy array (N,N), dtype=float
Integration matrix (only for SEM method).
dmat_d : numpy array (N,N), dtype=float
Differentiation matrix (only for SEM method).
dx : float
Uniform grid step size (only for FD method).
N : int
A total number of the grid points.
Np : int
Degree of each element (number of grid points in each element, only for SEM method).
Ne : int
A number of SEM elements (only for SEM method).
w_d : numpy array (N,), dtype=float
Weights of the nodes (on the global grid).
x_d: numpy array (N,), dtype=float
Domain grid points.
Hidden attributes
-----------------
AD_ : numpy array (Np,Np), dtype=float
Integration matrix on a single element in local frame (only for SEM method).
dmat_ : numpy array (Np,Np), dtype=float
Differentiation matrix on a single element in local frame (only for SEM method).
BC_ : numpy array (1,4), dtype=float
Representation of boundary condition with four values consistent with to:
BC_[0]*y[xbegin]+BC_[1]*y'[xbegin]=0
BC_[2]*y[xend]+BC_[3]*y'[xend]=0
ele_scale_ : float
Scaling coefficient for each element (only for SEM method)
massmat_full_ : numpy array (N,N), dtype=float
Full mass matrix
massmat_red_ : numpy array, dtype=float
Reduced mass matrix of the same size as stiffmat_red_
stiffmat_full_ : numpy array (N,N), dtype=float
Full stiffness matrix
stiffmat_red_ : numpy array, dtype=float
preallocated stiffness matrix with possibly reduced size,
due to throughing away of some of the equations, or projection onto Nullspace of Boundary operator
x_ : numpy array (Np,), dtype=float
Grid on a single element in local frame (only for SEM method)
w_ : numpy array (Np,), dtype=float
Weights on a single element in local frame (only for SEM method)
Null_M_ : numpy array
Nullspace of boundary operator (only for SEM method and 'projection' BC_method)
Methods:
--------
solve_EV : solves the eigenvalue problem for specified
functions peq(x), q(x), w(x), D in a chosen mode
Hidden functions and methods
----------------------------
_check_and_set_params : checks input parameters and sets grid dimensions
called upon initialization
_get_grid : calculates grid.
called upon initialization
_get_single_element : calculates local grid with numpy.polinomial functions
called by _get_grid
__get_single_element_numerics : calculates local grid with Newton method
called by _get_grid
_get_matrices : preallocates full and reduced stiffness and mass matrices
_set_AD_mat : calculate antiderivative matrix
_setmat : calculates stiffnes and mass matrices
called by solve_EV
"""
# List of availible methods
_MethodList = ['FD', 'SEM']
_grid_mode_calcList = ['built_in', 'newton']
_BC_methodList = ['projection', 'bound_subst']
def __init__(self, xbegin=-1.0, xend=1.0, method={'name': 'SEM', 'gridsize': {'Np': 8, 'Ne': 256}},
BoundCond={'leftB': 'Neumann', 'rightB': 'Neumann'}, grid_mode_calc='newton',
BC_method='projection', int_mode='full'):
self.xbegin = xbegin
self.xend = xend
self.method = method
self.BoundCond = BoundCond
self.grid_mode_calc = grid_mode_calc
self.BC_method = BC_method
self.int_mode = int_mode
# Assert inputs and set grid parameters: N, (Np, Ne)
self._check_and_set_params()
# Convert given boundary condition into a vector BC_ of size (1,4)
self._get_BC()
# Calculate grid, weights and (differentiation matrix)
self._get_grid()
# PreAllocate stiffness and mass matrices
self._get_matrices()
# Get the Nullspace
self._get_Nullspace()
# Calculate antiderivative matrix
self._set_AD_mat()
def Integrate(self, f, result=None):
"""Takes an indefinite integral of a function f using integration matrix (and a cumulative correction for 'sparse' int_mode).
Parameters
----------
f : numpy array, dtype=float
Function values evaluated on the grid
result : numpy array, dtype=float
A container for the results (to avoid additional allocation). If not provided, will return a result. The default is None.
Returns
-------
numpy array
If the result is not provided at the input, it will be returned.
"""
if result is None:
if self.int_mode == 'full':
return self.AD_d.dot(f)
elif self.int_mode == 'sparse':
return self.AD_d.dot(f) + np.append([0], np.repeat(np.cumsum(self.AD_d.dot(f)[0:-1:self.Np - 1]), self.Np - 1))
else:
if self.int_mode == 'full':
self.AD_d.dot(f, out=result)
elif self.int_mode == 'sparse':
self.AD_d.dot(f, out=result)
result += np.append([0], np.repeat(
np.cumsum(result[0:-1:self.Np - 1]), self.Np - 1))
def set_BoundCond(self, BoundCond):
"""Set new boundary conditions for the Stourm-Liouville probelem
Parameters
----------
BoundCond : dictionary
Specify boundary conditions
keys : 'leftB', 'rightB', (optionally: 'leftBCoeff', 'rightBCoeff')
values : 'Dirichlet' 'Neumann' or 'Robin'. If 'Robin', addionally specify
coefficients as a dictionary with two keys: [c1,c2], consistent with the boundary condition
of the form: c1*y(B)+c2*y'(B)=0
Example: {'leftB':'Robin','leftBCoeff':{'c1'=1, 'c2'=2}, 'rightB':'Robin','rightBCoeff':{'c1'=3, 'c2'=4}}
The default is {'leftB': 'Neumann', 'rightB': 'Neumann, 'leftBCoeff': {'c1': 1, 'c2': 2} }
"""
# Check parameters, set new boundary conditions and calculate new Nullspace projector
self.BoundCond = BoundCond
self._check_and_set_params()
self._get_BC()
self._get_Nullspace()
def solve_EV(self, peq=None, D=1, q=None, w=None, mode='hdark', fr=None, Nv=64):
"""Solve the Sturm-Liouville eigenvalue-eigenvector problem.
The problem can be specified either by peq, q and w functions or by the precalculated stiffmat and massmat
Parameters
----------
peq : numpy array, dtype=float
Equilibirum probabilioty distribution that determines potential Phi(x), see Suplementary Note 1.1. 1D array.
D : float
Noise magnitude.
q : numpy array, dtype=float
A function q(x) in the S-L problem. The default value is None, in this case q(x)=0
w : numpy array, dtype=float
A function w(x) in the S-L problem (non-negative). The default is None, in this case w(x)=1
mode : str
Specify mode. Availiable modes:
'normal': solve Sturm-Liouville problem, ignore D and fr.
'h0': solve for eigenvalues and vectors of FP operator H0.
'hdark': solve for eigenvalues and vector of FP and H operator
The default is 'hdark'.
fr : numpy array
The firing rate function (required for 'hdark' mode).
This firing rate function is an elementwise sum of the firing rate functions of all the neuronal responses.
The default is None.
Nv : int
A number of eigenvectors/eigenvalues returned. The default is 64.
Returns
-------
lQ : numpy array (Nv,), dtype=float
The least Nv eigenvalues for the eigenvalue problem of H0 operator.
QxOrig : numpy array (Nv,Nv), dtype=float
The corresponding scaled eigenvectors
Qx : numpy array (Nv,Nv), dtype=float
The eigenvectors of EV problem of H0 operator | |
with each position) is incorrect
and will produce errors
"""
"*** YOUR CODE HERE ***"
self.particles = []
signal = False
while not signal:
# loop for each position
for item in self.legalPositions:
if len(self.particles) != self.numParticles:
self.particles.append(item)
else:
signal = True
break
def observe(self, observation, gameState):
"""
Update beliefs based on the given distance observation. Make
sure to handle the special case where all particles have weight
0 after reweighting based on observation. If this happens,
resample particles uniformly at random from the set of legal
positions (self.legalPositions).
A correct implementation will handle two special cases:
1) When a ghost is captured by Pacman, **all** particles should be updated so
that the ghost appears in its prison cell, self.getJailPosition()
You can check if a ghost has been captured by Pacman by
checking if it has a noisyDistance of None (a noisy distance
of None will be returned if, and only if, the ghost is
captured).
2) When all particles receive 0 weight, they should be recreated from the
prior distribution by calling initializeUniformly. The total weight
for a belief distribution can be found by calling totalCount on
a Counter object
util.sample(Counter object) is a helper method to generate a sample from
a belief distribution
You may also want to use util.manhattanDistance to calculate the distance
between a particle and pacman's position.
"""
noisyDistance = observation
emissionModel = busters.getObservationDistribution(noisyDistance)
pacmanPosition = gameState.getPacmanPosition()
"*** YOUR CODE HERE ***"
# util.raiseNotDefined()
if noisyDistance is None:
for i in range(0, self.numParticles):
self.particles[i] = self.getJailPosition()
else:
allPossible = util.Counter()
currBeliefs = self.getBeliefDistribution()
for position in self.legalPositions:
distance = util.manhattanDistance(pacmanPosition, position)
allPossible[position] = emissionModel[distance] * currBeliefs[position]
allPossible.normalize()
signal = False
for value in allPossible.values():
if value != 0:
signal = True
break
if signal == False:
# need to initialize game state
self.initializeUniformly(gameState)
else:
# re-sample partivles
resampleParticles = []
for i in range(0, self.numParticles):
resampleParticles.append(util.sample(allPossible))
self.particles = resampleParticles
def elapseTime(self, gameState):
"""
Update beliefs for a time step elapsing.
As in the elapseTime method of ExactInference, you should use:
newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))
to obtain the distribution over new positions for the ghost, given
its previous position (oldPos) as well as Pacman's current
position.
util.sample(Counter object) is a helper method to generate a sample from a
belief distribution
"""
"*** YOUR CODE HERE ***"
# util.raiseNotDefined()
resampledParticles = []
for particle in self.particles:
ghostPos = self.setGhostPosition(gameState, particle)
newPosDist = self.getPositionDistribution(ghostPos)
newSamples = util.sample(newPosDist)
resampledParticles.append(newSamples)
# update
self.particles = resampledParticles
def getBeliefDistribution(self):
"""
Return the agent's current belief state, a distribution over
ghost locations conditioned on all evidence and time passage. This method
essentially converts a list of particles into a belief distribution (a Counter object)
"""
"*** YOUR CODE HERE ***"
# util.raiseNotDefined()
probDist = util.Counter()
for particle in self.particles:
probDist[particle] += 1
for pos in probDist:
probDist[pos] /= float(self.numParticles)
return probDist
class MarginalInference(InferenceModule):
"A wrapper around the JointInference module that returns marginal beliefs about ghosts."
def initializeUniformly(self, gameState):
"Set the belief state to an initial, prior value."
if self.index == 1: jointInference.initialize(gameState, self.legalPositions)
jointInference.addGhostAgent(self.ghostAgent)
def observeState(self, gameState):
"Update beliefs based on the given distance observation and gameState."
if self.index == 1: jointInference.observeState(gameState)
def elapseTime(self, gameState):
"Update beliefs for a time step elapsing from a gameState."
if self.index == 1: jointInference.elapseTime(gameState)
def getBeliefDistribution(self):
"Returns the marginal belief over a particular ghost by summing out the others."
jointDistribution = jointInference.getBeliefDistribution()
dist = util.Counter()
for t, prob in jointDistribution.items():
dist[t[self.index - 1]] += prob
return dist
class JointParticleFilter:
"JointParticleFilter tracks a joint distribution over tuples of all ghost positions."
def __init__(self, numParticles=600):
self.setNumParticles(numParticles)
def setNumParticles(self, numParticles):
self.numParticles = numParticles
def initialize(self, gameState, legalPositions):
"Stores information about the game, then initializes particles."
self.numGhosts = gameState.getNumAgents() - 1
self.ghostAgents = []
self.legalPositions = legalPositions
self.initializeParticles()
def initializeParticles(self):
"""
Initialize particles to be consistent with a uniform prior.
Each particle is a tuple of ghost positions. Use self.numParticles for
the number of particles. You may find the python package 'itertools' helpful.
Specifically, you will need to think about permutations of legal ghost
positions, with the additional understanding that ghosts may occupy the
same space. Look at the 'product' function in itertools to get an
implementation of the catesian product. Note: If you use
itertools, keep in mind that permutations are not returned in a random order;
you must shuffle the list of permutations in order to ensure even placement
of particles across the board. Use self.legalPositions to obtain a list of
positions a ghost may occupy.
** NOTE **
the variable you store your particles in must be a list; a list is simply a collection
of unweighted variables (positions in this case). Storing your particles as a Counter or
dictionary (where there could be an associated weight with each position) is incorrect
and will produce errors
"""
"*** YOUR CODE HERE ***"
# same as prev init but with cartesianProduct
self.particles = []
positions = []
cartesianProduct = itertools.product(self.legalPositions, self.legalPositions)
for item in cartesianProduct:
positions.append(item)
signal = False
while signal == False:
for item in positions:
if len(self.particles) == self.numParticles:
signal = True
break
self.particles.append(item)
def addGhostAgent(self, agent):
"Each ghost agent is registered separately and stored (in case they are different)."
self.ghostAgents.append(agent)
def getJailPosition(self, i):
return (2 * i + 1, 1);
def observeState(self, gameState):
"""
Resamples the set of particles using the likelihood of the noisy observations.
To loop over the ghosts, use:
for i in range(self.numGhosts):
...
A correct implementation will handle two special cases:
1) When a ghost is captured by Pacman, all particles should be updated so
that the ghost appears in its prison cell, position self.getJailPosition(i)
where "i" is the index of the ghost.
You can check if a ghost has been captured by Pacman by
checking if it has a noisyDistance of None (a noisy distance
of None will be returned if, and only if, the ghost is
captured).
2) When all particles receive 0 weight, they should be recreated from the
prior distribution by calling initializeParticles. After all particles
are generated randomly, any ghosts that are eaten (have noisyDistance of 0)
must be changed to the jail Position. This will involve changing each
particle if a ghost has been eaten.
** Remember ** We store particles as tuples, but to edit a specific particle,
it must be converted to a list, edited, and then converted back to a tuple. Since
this is a common operation when placing a ghost in the jail for a particle, we have
provided a helper method named self.getParticleWithGhostInJail(particle, ghostIndex)
that performs these three operations for you.
"""
pacmanPosition = gameState.getPacmanPosition()
noisyDistances = gameState.getNoisyGhostDistances()
if len(noisyDistances) < self.numGhosts: return
emissionModels = [busters.getObservationDistribution(dist) for dist in noisyDistances]
"*** YOUR CODE HERE ***"
allPossible = util.Counter()
for particle in self.particles:
prob = 1.0
for i in range(self.numGhosts):
if noisyDistances[i] is not None:
particlePos = particle[i]
distance = util.manhattanDistance(pacmanPosition, particlePos)
prob *= emissionModels[i][distance]
else:
particle = self.getParticleWithGhostInJail(particle, i)
allPossible[particle] += prob
allPossible.normalize()
signal = False
for item in allPossible.values():
if item != 0:
signal = True
break
if signal == False:
self.initializeParticles()
else:
resampledParticles = []
for i in range(0, self.numParticles):
resampledParticles.append(util.sample(allPossible))
self.particles = resampledParticles
def getParticleWithGhostInJail(self, particle, ghostIndex):
particle = list(particle)
particle[ghostIndex] = self.getJailPosition(ghostIndex)
return tuple(particle)
def elapseTime(self, gameState):
"""
Samples each particle's next state based on its current state and the gameState.
To loop over the ghosts, use:
for i in range(self.numGhosts):
...
Then, assuming that "i" refers to the index of the
ghost, to obtain the distributions over new positions for that
single ghost, given the list (prevGhostPositions) of previous
positions of ALL of the ghosts, use this line of code:
newPosDist = getPositionDistributionForGhost(setGhostPositions(gameState, prevGhostPositions),
i, self.ghostAgents[i])
**Note** that you may need to replace "prevGhostPositions" with the
correct name of the variable that you have used to refer | |
# coding: utf-8
"""
FeersumNLU API
This is the HTTP API for Feersum NLU. See https://github.com/praekelt/feersum-nlu-api-wrappers for examples of how to use the API. # noqa: E501
OpenAPI spec version: 2.0.54.dev2
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from feersum_nlu.api_client import ApiClient
class ImageDatasetsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def image_dataset_add_samples(self, instance_name, labelled_image_sample_list, **kwargs): # noqa: E501
"""Add samples. Image format is 256x256 RGB; jpeg encoding at quality 50 is suggested. # noqa: E501
Add samples to named image dataset. Returns the samples added to the instance. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.image_dataset_add_samples(instance_name, labelled_image_sample_list, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param list[LabelledImageSample] labelled_image_sample_list: List of labelled image samples. (required)
:param str x_caller:
:return: list[LabelledImageSample]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.image_dataset_add_samples_with_http_info(instance_name, labelled_image_sample_list, **kwargs) # noqa: E501
else:
(data) = self.image_dataset_add_samples_with_http_info(instance_name, labelled_image_sample_list, **kwargs) # noqa: E501
return data
def image_dataset_add_samples_with_http_info(self, instance_name, labelled_image_sample_list, **kwargs): # noqa: E501
"""Add samples. Image format is 256x256 RGB; jpeg encoding at quality 50 is suggested. # noqa: E501
Add samples to named image dataset. Returns the samples added to the instance. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.image_dataset_add_samples_with_http_info(instance_name, labelled_image_sample_list, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param list[LabelledImageSample] labelled_image_sample_list: List of labelled image samples. (required)
:param str x_caller:
:return: list[LabelledImageSample]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['instance_name', 'labelled_image_sample_list', 'x_caller'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method image_dataset_add_samples" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'instance_name' is set
if ('instance_name' not in params or
params['instance_name'] is None):
raise ValueError("Missing the required parameter `instance_name` when calling `image_dataset_add_samples`") # noqa: E501
# verify the required parameter 'labelled_image_sample_list' is set
if ('labelled_image_sample_list' not in params or
params['labelled_image_sample_list'] is None):
raise ValueError("Missing the required parameter `labelled_image_sample_list` when calling `image_dataset_add_samples`") # noqa: E501
collection_formats = {}
path_params = {}
if 'instance_name' in params:
path_params['instance_name'] = params['instance_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_caller' in params:
header_params['X-CALLER'] = params['x_caller'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'labelled_image_sample_list' in params:
body_params = params['labelled_image_sample_list']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyHeader_old'] # noqa: E501
return self.api_client.call_api(
'/vision/v2/image_datasets/{instance_name}/samples', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[LabelledImageSample]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def image_dataset_create(self, create_details, **kwargs): # noqa: E501
"""Create an image dataset. # noqa: E501
Create a new image dataset or reload one from the trash. Returns the details of the instance. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.image_dataset_create(create_details, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ImageDatasetCreateDetails create_details: The details of the instance to create. (required)
:param str x_caller:
:return: ImageDatasetInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.image_dataset_create_with_http_info(create_details, **kwargs) # noqa: E501
else:
(data) = self.image_dataset_create_with_http_info(create_details, **kwargs) # noqa: E501
return data
def image_dataset_create_with_http_info(self, create_details, **kwargs): # noqa: E501
"""Create an image dataset. # noqa: E501
Create a new image dataset or reload one from the trash. Returns the details of the instance. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.image_dataset_create_with_http_info(create_details, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ImageDatasetCreateDetails create_details: The details of the instance to create. (required)
:param str x_caller:
:return: ImageDatasetInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['create_details', 'x_caller'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method image_dataset_create" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'create_details' is set
if ('create_details' not in params or
params['create_details'] is None):
raise ValueError("Missing the required parameter `create_details` when calling `image_dataset_create`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'x_caller' in params:
header_params['X-CALLER'] = params['x_caller'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'create_details' in params:
body_params = params['create_details']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyHeader_old'] # noqa: E501
return self.api_client.call_api(
'/vision/v2/image_datasets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ImageDatasetInstanceDetail', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def image_dataset_del(self, instance_name, **kwargs): # noqa: E501
"""Delete named instance. # noqa: E501
Delete and return the details of the named image dataset instance. Deleted instances can be reloaded from the trash with the create operation. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.image_dataset_del(instance_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param str x_caller:
:return: ImageDatasetInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.image_dataset_del_with_http_info(instance_name, **kwargs) # noqa: E501
else:
(data) = self.image_dataset_del_with_http_info(instance_name, **kwargs) # noqa: E501
return data
def image_dataset_del_with_http_info(self, instance_name, **kwargs): # noqa: E501
"""Delete named instance. # noqa: E501
Delete and return the details of the named image dataset instance. Deleted instances can be reloaded from the trash with the create operation. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.image_dataset_del_with_http_info(instance_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param str x_caller:
:return: ImageDatasetInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['instance_name', 'x_caller'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method image_dataset_del" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'instance_name' is set
if ('instance_name' not in params or
params['instance_name'] is None):
raise ValueError("Missing the required parameter `instance_name` when calling `image_dataset_del`") # noqa: E501
collection_formats = {}
path_params = {}
if 'instance_name' in params:
path_params['instance_name'] = params['instance_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_caller' in params:
header_params['X-CALLER'] = params['x_caller'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyHeader_old'] # noqa: E501
return self.api_client.call_api(
'/vision/v2/image_datasets/{instance_name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ImageDatasetInstanceDetail', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def | |
country code of the locale.
:param str variant: The variant of the locale.
"""
self.locales[language_code] = (country_code, variant)
def add_property(self, key, value):
"""Add a property
:param str key: Key of the property.
:param str value: Value of the property.
"""
self.properties.append((key, value))
def add_ref_annotation(self, id_tier, tier2, time, value='',
prev=None, svg=None):
"""Add a reference annotation.
.. note:: When a timepoint matches two annotations the new reference
annotation will reference to the first annotation. To circumvent this
it's always safer to take the middle of the annotation you want to
reference to.
:param str id_tier: Name of the tier.
:param str tier2: Tier of the referenced annotation.
:param int time: Time of the referenced annotation.
:param str value: Value of the annotation.
:param str prev: Id of the previous annotation.
:param str svg_ref: Svg reference.
:raises KeyError: If the tier is non existent.
:raises ValueError: If the tier already contains normal annotations or
if there is no annotation in the tier on the time to reference to.
"""
if self.tiers[id_tier][0]:
raise ValueError('This tier already contains normal annotations.')
ann = None
for aid, (begin, end, _, _) in self.tiers[tier2][0].items():
begin = self.timeslots[begin]
end = self.timeslots[end]
if begin <= time and end >= time:
ann = aid
break
if not ann:
raise ValueError('There is no annotation to reference to.')
aid = self.generate_annotation_id()
self.annotations[aid] = id_tier
self.tiers[id_tier][1][aid] = (ann, value, prev, svg)
def add_secondary_linked_file(self, file_path, relpath=None, mimetype=None,
time_origin=None, assoc_with=None):
"""Add a secondary linked file.
:param str file_path: Path of the file.
:param str relpath: Relative path of the file.
:param str mimetype: Mimetype of the file, if ``None`` it tries to
guess it according to the file extension which currently only works
for wav, mpg, mpeg and xml.
:param int time_origin: Time origin for the media file.
:param str assoc_with: Associated with field.
:raises KeyError: If mimetype had to be guessed and a non standard
extension or an unknown mimetype.
"""
if mimetype is None:
mimetype = self.MIMES[file_path.split('.')[-1]]
self.linked_file_descriptors.append({
'LINK_URL': file_path, 'RELATIVE_LINK_URL': relpath,
'MIME_TYPE': mimetype, 'TIME_ORIGIN': time_origin,
'ASSOCIATED_WITH': assoc_with})
def add_tier(self, tier_id, ling='default-lt', parent=None, locale=None,
part=None, ann=None, language=None, tier_dict=None):
"""Add a tier. When no linguistic type is given and the default
linguistic type is unavailable then the assigned linguistic type will
be the first in the list.
:param str tier_id: Name of the tier.
:param str ling: Linguistic type, if the type is not available it will
warn and pick the first available type.
:param str parent: Parent tier name.
:param str locale: Locale, if the locale is not present this option is
ignored and the locale will not be set.
:param str part: Participant.
:param str ann: Annotator.
:param str language: Language , if the language is not present this
option is ignored and the language will not be set.
:param dict tier_dict: TAG attributes, when this is not ``None`` it
will ignore all other options. Please only use
dictionaries coming from the
:func:`get_parameters_for_tier`
:raises ValueError: If the tier_id is empty
"""
if not tier_id:
raise ValueError('Tier id is empty...')
if ling not in self.linguistic_types:
ling = sorted(self.linguistic_types.keys())[0]
if locale and locale not in self.locales:
locale = None
if language and language not in self.languages:
language = None
if tier_dict is None:
self.tiers[tier_id] = ({}, {}, {
'TIER_ID': tier_id,
'LINGUISTIC_TYPE_REF': ling,
'PARENT_REF': parent,
'PARTICIPANT': part,
'DEFAULT_LOCALE': locale,
'LANG_REF': language,
'ANNOTATOR': ann}, len(self.tiers))
else:
self.tiers[tier_id] = ({}, {}, tier_dict, len(self.tiers))
def child_tiers_for(self, id_tier):
""".. deprecated: 1.5
Use :func:`get_child_tiers_for`
"""
return self.get_child_tiers_for(id_tier)
def clean_time_slots(self):
"""Clean up all unused timeslots.
.. warning:: This can and will take time for larger tiers.
When you want to do a lot of operations on a lot of tiers please unset
the flags for cleaning in the functions so that the cleaning is only
performed afterwards.
"""
ts = ((a[0], a[1]) for t in self.tiers.values() for a in t[0].values())
for a in {a for b in ts for a in b} ^ set(self.timeslots):
del(self.timeslots[a])
def copy_tier(self, eaf_obj, tier_name):
"""Copies a tier to another :class:`pympi.Elan.Eaf` object.
:param pympi.Elan.Eaf eaf_obj: Target Eaf object.
:param str tier_name: Name of the tier.
:raises KeyError: If the tier doesn't exist.
"""
if tier_name in eaf_obj.get_tier_names():
eaf_obj.remove_tier(tier_name)
eaf_obj.add_tier(tier_name,
tier_dict=self.get_parameters_for_tier(tier_name))
for ann in self.get_annotation_data_for_tier(tier_name):
eaf_obj.insert_annotation(tier_name, ann[0], ann[1], ann[2])
def create_gaps_and_overlaps_tier(self, tier1, tier2, tier_name=None,
maxlen=-1, fast=False):
"""Create a tier with the gaps and overlaps of the annotations.
For types see :func:`get_gaps_and_overlaps`
:param str tier1: Name of the first tier.
:param str tier2: Name of the second tier.
:param str tier_name: Name of the new tier, if ``None`` the name will
be generated.
:param int maxlen: Maximum length of gaps (skip longer ones), if ``-1``
no maximum will be used.
:param bool fast: Flag for using the fast method.
:returns: List of gaps and overlaps of the form:
``[(type, start, end)]``.
:raises KeyError: If a tier is non existent.
:raises IndexError: If no annotations are available in the tiers.
"""
if tier_name is None:
tier_name = '{}_{}_ftos'.format(tier1, tier2)
self.add_tier(tier_name)
ftos = []
ftogen = self.get_gaps_and_overlaps2(tier1, tier2, maxlen) if fast\
else self.get_gaps_and_overlaps(tier1, tier2, maxlen)
for fto in ftogen:
ftos.append(fto)
if fto[1]-fto[0] >= 1:
self.add_annotation(tier_name, fto[0], fto[1], fto[2])
self.clean_time_slots()
return ftos
def extract(self, start, end):
"""Extracts the selected time frame as a new object.
:param int start: Start time.
:param int end: End time.
:returns: class:`pympi.Elan.Eaf` object containing the extracted frame.
"""
from copy import deepcopy
eaf_out = deepcopy(self)
for t in eaf_out.get_tier_names():
for ab, ae, value in eaf_out.get_annotation_data_for_tier(t):
if ab > end or ae < start:
eaf_out.remove_annotation(t, (start-end)//2, False)
eaf_out.clean_time_slots()
return eaf_out
def filter_annotations(self, tier, tier_name=None, filtin=None,
filtex=None, regex=False, safe=False):
"""Filter annotations in a tier using an exclusive and/or inclusive
filter.
:param str tier: Name of the tier.
:param str tier_name: Name of the output tier, when ``None`` the name
will be generated.
:param list filtin: List of strings to be included, if None all
annotations all is included.
:param list filtex: List of strings to be excluded, if None no strings
are excluded.
:param bool regex: If this flag is set, the filters are seen as regex
matches.
:param bool safe: Ignore zero length annotations(when working with
possible malformed data).
:returns: Name of the created tier.
:raises KeyError: If the tier is non existent.
"""
if tier_name is None:
tier_name = '{}_filter'.format(tier)
self.add_tier(tier_name)
func = (lambda x, y: re.match(x, y)) if regex else lambda x, y: x == y
for begin, end, value in self.get_annotation_data_for_tier(tier):
if (filtin and not any(func(f, value) for f in filtin)) or\
(filtex and any(func(f, value) for f in filtex)):
continue
if not safe or end > begin:
self.add_annotation(tier_name, begin, end, value)
self.clean_time_slots()
return tier_name
def generate_annotation_id(self):
"""Generate the next annotation id, this function is mainly used
internally.
"""
if not self.maxaid:
valid_anns = [int(''.join(filter(str.isdigit, a)))
for a in self.timeslots]
self.maxaid = max(valid_anns + [1])+1
else:
self.maxaid += 1
return 'a{:d}'.format(self.maxaid)
def generate_ts_id(self, time=None):
"""Generate the next timeslot id, this function is mainly used
internally
:param int time: Initial time to assign to the timeslot.
:raises ValueError: If the time is negative.
"""
if time and time < 0:
raise ValueError('Time is negative...')
if not self.maxts:
valid_ts = [int(''.join(filter(str.isdigit, a)))
for a in self.timeslots]
self.maxts = max(valid_ts + [1])+1
else:
self.maxts += 1
ts = 'ts{:d}'.format(self.maxts)
self.timeslots[ts] = time
return ts
def get_annotation_data_at_time(self, id_tier, time):
"""Give the annotations at the given time. When the tier contains
reference annotations this will be returned, check
:func:`get_ref_annotation_data_at_time` for the format.
:param str id_tier: Name of the tier.
:param int time: Time of the annotation.
:returns: List of annotations at that time.
:raises KeyError: If the tier is non existent.
"""
if self.tiers[id_tier][1]:
return self.get_ref_annotation_at_time(id_tier, time)
anns = self.tiers[id_tier][0]
return sorted([(self.timeslots[m[0]], self.timeslots[m[1]], m[2])
for m in anns.values() if
self.timeslots[m[0]] <= time and
self.timeslots[m[1]] >= time])
def get_annotation_data_after_time(self, id_tier, time):
"""Give the annotation before a given time. When the tier contains
| |
<filename>tweakwcs/tests/test_linearfit.py<gh_stars>0
"""
A module containing unit tests for the `wcsutil` module.
Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
from itertools import product
import math
import pytest
import numpy as np
from tweakwcs import linearfit, linalg
_LARGE_SAMPLE_SIZE = 1000
_SMALL_SAMPLE_SIZE = 10
_BAD_DATA_FRACTION = 0.2
_TRANSFORM_SELECTOR = {
'rscale': linearfit.fit_rscale,
'general': linearfit.fit_general,
'shift': linearfit.fit_shifts,
}
_ATOL = 10 * _LARGE_SAMPLE_SIZE * np.sqrt(
np.finfo(linalg._MAX_LINALG_TYPE).eps
)
@pytest.fixture(scope="module")
def ideal_small_data(request):
# rscale data with proper rotations and no noise
uv = np.random.random((_SMALL_SAMPLE_SIZE, 2))
xy = np.random.random((_SMALL_SAMPLE_SIZE, 2))
wuv = np.random.random(_SMALL_SAMPLE_SIZE)
wxy = np.random.random(_SMALL_SAMPLE_SIZE)
return uv, xy, wuv, wxy
@pytest.fixture(scope="function", params=[
'shifts', 'rscale', 'rscale-flip-x', 'rscale-flip-y', 'affine'
])
def ideal_large_data(request):
# rscale data with proper rotations and no noise
uv = np.random.random((_LARGE_SAMPLE_SIZE, 2))
# assume an image size of 4096x2048:
uv[:, 0] *= 2047.0
uv[:, 1] *= 4095.0
# rotation angle(s):
angle = 360.0 * np.random.random() # 0 ... 360
if request.param == 'shifts':
angle = (0, 0)
scale = (1, 1)
proper = True
transform = 'shift'
elif request.param == 'rscale':
angle = (angle, angle)
scale = 2 * (0.8 + 0.4 * np.random.random(), ) # 0.8 ... 1.2
proper = True
transform = 'rscale'
elif request.param == 'rscale-flip-x':
angle = ((angle + 180.0) % 360.0, angle)
scale = 2 * (0.8 + 0.4 * np.random.random(), ) # 0.8 ... 1.2
proper = False
transform = 'rscale'
elif request.param == 'rscale-flip-y':
angle = (angle, (angle + 180.0) % 360.0)
scale = 2 * (0.8 + 0.4 * np.random.random(), ) # 0.8 ... 1.2
proper = False
transform = 'rscale'
elif request.param == 'affine':
# rotation angles:
offset = 150.0 * (np.random.random() - 0.5) # -75 ... 75
offset += 180.0 * np.random.choice([0.0, 1.0]) # add random axis flip
angle = (angle, (angle + offset) % 360.0)
# scales:
scale = 0.8 + 0.4 * np.random.random(2) # 0.8 ... 1.2
# proper:
rad = np.deg2rad(angle)
proper = (np.prod(np.cos(rad)) + np.prod(np.sin(rad))) > 0
transform = 'general'
shift = 200.0 * (np.random.random(2) - 0.5) # -100 ... +100
rmat = linearfit.build_fit_matrix(angle, scale)
skew = angle[1] - angle[0]
# apply rscale
xy = np.dot(uv, rmat.T) + shift
return uv, xy, angle, scale, shift, rmat, proper, skew, transform
@pytest.fixture(scope="function",
params=[v for v in product(*(2 * [[False, True]]))])
def weight_data(request):
nbd = int(_BAD_DATA_FRACTION * _LARGE_SAMPLE_SIZE)
minv = 1000.0
maxv = 1.0e6
if not any(request.param):
wxy = None
wuv = None
idx_xy = (np.array([], dtype=np.int), )
idx_uv = (np.array([], dtype=np.int), )
bd_xy = np.zeros((0, 2))
bd_uv = np.zeros((0, 2))
elif all(request.param):
wxy = np.random.random(_LARGE_SAMPLE_SIZE)
wuv = np.random.random(_LARGE_SAMPLE_SIZE)
# split indices into (almost) equal parts:
nbdxy = int((0.3 + 0.4 * np.random.random()) * nbd)
nbduv = nbd - nbdxy
idx_xy = (np.random.choice(np.arange(_LARGE_SAMPLE_SIZE),
nbdxy, replace=False), )
idx_uv = (np.random.choice(np.arange(_LARGE_SAMPLE_SIZE),
nbduv, replace=False), )
wxy[idx_xy] = 0.0
wuv[idx_uv] = 0.0
sign = np.random.choice([-1, 1], (nbdxy, 2))
bd_xy = sign * (minv + (maxv - minv) * np.random.random((nbdxy, 2)))
sign = np.random.choice([-1, 1], (nbduv, 2))
bd_uv = sign * (minv + (maxv - minv) * np.random.random((nbduv, 2)))
elif request.param[0] and not request.param[1]:
idx = np.random.choice(np.arange(_LARGE_SAMPLE_SIZE),
nbd, replace=False)
idx_xy = (idx, )
idx_uv = (np.array([], dtype=np.int), )
wxy = np.random.random(_LARGE_SAMPLE_SIZE)
wxy[idx_xy] = 0.0
wuv = None
sign = np.random.choice([-1, 1], (nbd, 2))
bd_xy = sign * (minv + (maxv - minv) * np.random.random((nbd, 2)))
bd_uv = np.zeros((0, 2))
else:
idx = np.random.choice(np.arange(_LARGE_SAMPLE_SIZE), nbd,
replace=False)
idx_uv = (idx, )
idx_xy = (np.array([], dtype=np.int), )
wuv = np.random.random(_LARGE_SAMPLE_SIZE)
wuv[idx_uv] = 0.0
wxy = None
sign = np.random.choice([-1, 1], (nbd, 2))
bd_uv = sign * (minv + (maxv - minv) * np.random.random((nbd, 2)))
bd_xy = np.zeros((0, 2))
return wxy, wuv, idx_xy, idx_uv, bd_xy, bd_uv
@pytest.fixture(scope="module")
def tiny_zero_data():
""" Return a tuple of (xy, uv, wxy, wuv)"""
return np.zeros((3, 2)), np.zeros((3, 2)), np.zeros(3), np.zeros(3)
@pytest.fixture(scope="function", params=[
linearfit.fit_shifts, linearfit.fit_rscale, linearfit.fit_general
])
def fit_functions(request):
return request.param
def test_build_fit_matrix_identity():
i = np.identity(2)
r = linearfit.build_fit_matrix(0) # also test that default scale value = 1
assert np.allclose(i, r, rtol=0, atol=_ATOL)
r = linearfit.build_fit_matrix((0, 0), (1, 1))
assert np.allclose(i, r, rtol=0, atol=_ATOL)
@pytest.mark.parametrize('rot', [1, 35, 75, 95, 155, 189, 261, 299, 358])
def test_build_fit_matrix_rot(rot):
i = np.identity(2)
m = linearfit.build_fit_matrix(rot)
minv = linearfit.build_fit_matrix(360 - rot)
assert np.allclose(i, np.dot(m, minv), rtol=0, atol=_ATOL)
@pytest.mark.parametrize('rot, scale', [
((1, 4), (2.4, 5.6)),
((31, 78), (0.9, 1.3)),
])
def test_build_fit_matrix_generalized(rot, scale):
i = np.identity(2)
m = linearfit.build_fit_matrix(rot, scale)
# check scale:
assert np.allclose(np.sqrt(np.sum(m**2, axis=0)), scale,
rtol=0, atol=_ATOL)
ms = np.diag(scale)
# check rotations:
mr = linearfit.build_fit_matrix(rot, 1)
mrinv = linearfit.build_fit_matrix(rot[::-1], 1).T
assert np.allclose(np.linalg.det(mr) * i, np.dot(mr, mrinv),
rtol=0, atol=_ATOL)
assert np.allclose(m, np.dot(mr, ms), rtol=0, atol=_ATOL)
@pytest.mark.parametrize('uv, xy, wuv, wxy', [
(np.zeros(10), np.zeros(10), None, None),
(np.zeros((10, 2, 2)), np.zeros(10), None, None),
(np.zeros((10, 2)), np.zeros((11, 2)), None, None),
3 * (np.zeros((10, 2)), ) + (None, ),
2 * (np.zeros((10, 2)), ) + (None, np.zeros((10, 2))),
2 * (np.zeros((10, 2)), ) + (None, np.zeros((5, 2))),
2 * (np.zeros((10, 2)), ) + (np.zeros((5, 2)), None),
])
def test_iter_linear_fit_invalid_shapes(uv, xy, wuv, wxy):
# incorrect coordinate array dimensionality:
with pytest.raises(ValueError):
linearfit.iter_linear_fit(xy, uv, wxy=wxy, wuv=wuv)
@pytest.mark.parametrize('nclip, sigma', [
(3, None), (-3, None), (3, -1), (-1, 3), (3, (1.0, 'invalid')),
])
def test_iter_linear_fit_invalid_sigma_nclip(ideal_small_data, nclip, sigma):
uv, xy, _, _ = ideal_small_data
with pytest.raises(ValueError):
linearfit.iter_linear_fit(xy, uv, nclip=nclip, sigma=sigma)
def test_iter_linear_fit_invalid_fitgeom(ideal_small_data):
uv, xy, _, _ = ideal_small_data
with pytest.raises(ValueError):
linearfit.iter_linear_fit(xy, uv, fitgeom='invalid')
@pytest.mark.parametrize('nclip, sigma, clip_accum, weights, noise', [
(None, 2, True, False, False),
(None, 2, True, True, False),
(2, 0.05, False, True, True),
])
def test_iter_linear_fit_special_cases(ideal_large_data, nclip, sigma,
clip_accum, weights, noise):
uv, xy, _, _, shift, rmat, _, _, fitgeom = ideal_large_data
if weights:
wxy, wuv = 0.1 + 0.9 * np.random.random((2, xy.shape[0]))
else:
wxy = None
wuv = None
if noise:
xy = xy + np.random.normal(0, 0.01, xy.shape)
atol = 0.01
else:
atol = _ATOL
fit = linearfit.iter_linear_fit(xy, uv, wxy, wuv, fitgeom=fitgeom,
nclip=nclip, center=(0, 0), sigma=1,
clip_accum=clip_accum)
assert np.allclose(fit['shift'], shift, rtol=0, atol=atol)
assert np.allclose(fit['matrix'], rmat, rtol=0, atol=atol)
@pytest.mark.parametrize('weights', [False, True])
def test_iter_linear_fit_1point(weights):
xy = np.array([[1.0, 2.0]])
shifts = 20 * (np.random.random(2) - 0.5)
if weights:
wxy, wuv = 0.1 + 0.9 * np.random.random((2, xy.shape[0]))
else:
wxy, wuv = None, None
fit = linearfit.iter_linear_fit(xy, xy + shifts, wxy=wxy, wuv=wuv,
fitgeom='shift', nclip=0)
assert np.allclose(fit['shift'], -shifts, rtol=0, atol=_ATOL)
assert np.allclose(fit['matrix'], np.identity(2), rtol=0, atol=_ATOL)
def test_iter_linear_fit_fitgeom_clip_all_data(ideal_large_data):
# Test that clipping is interrupted if number of sources after clipping
# is below minobj for a given fit:
xy, uv, _, _, _, _, _, _, fitgeom = ideal_large_data
ndata = xy.shape[0]
uv = uv + np.random.normal(0, 0.01, (ndata, 2))
wxy, wuv = 0.1 + 0.9 * np.random.random((2, ndata))
fit = linearfit.iter_linear_fit(
xy, uv, wxy, wuv, fitgeom=fitgeom, sigma=1e-50, nclip=100
)
assert np.count_nonzero(fit['fitmask']) == len(xy)
assert fit['eff_nclip'] == 0
def test_compute_stat_invalid_weights(ideal_small_data):
pts, _, _, _ = ideal_small_data
weights = np.zeros(pts.shape[0])
fit = {}
linearfit._compute_stat(fit, pts, weights)
assert math.isnan(fit['rmse'])
assert math.isnan(fit['mae'])
assert math.isnan(fit['std'])
@pytest.mark.parametrize('fit_function', [
linearfit.fit_rscale, linearfit.fit_general,
])
def test_fit_detect_colinear_points(fit_function, tiny_zero_data):
xy, uv, _, _ = tiny_zero_data
xy = xy + [1, 2]
with pytest.raises(linearfit.SingularMatrixError):
fit_function(xy, uv)
def test_fit_detect_zero_weights(fit_functions, tiny_zero_data):
xy, uv, wxy, _ = tiny_zero_data
# all weights are zero:
with pytest.raises(ValueError):
fit_functions(xy, uv, wxy=wxy)
def test_fit_detect_negative_weights(fit_functions, tiny_zero_data):
xy, uv, wuv, _ = tiny_zero_data
wuv.copy()
wuv[0] = -1
# some weights are negative (=invalid):
with pytest.raises(ValueError):
fit_functions(xy, uv, wuv=wuv)
@pytest.mark.parametrize('fit_function, npts', [
(linearfit.fit_shifts, 0),
(linearfit.fit_rscale, 1),
(linearfit.fit_general, 2),
])
def test_fit_general_too_few_points(fit_function, npts):
with pytest.raises(linearfit.NotEnoughPointsError):
fit_function(np.zeros((npts, 2)), np.zeros((npts, 2)))
@pytest.mark.parametrize(
'clip_accum, noise',
[v for v in product(*(2 * [[False, True]]))]
)
def test_iter_linear_fit_clip_style(ideal_large_data, weight_data,
clip_accum, noise):
""" Test clipping behavior. Test that weights exclude "bad" data. """
uv, xy, angle, scale, shift, rmat, proper, skew, fitgeom = ideal_large_data
wxy, wuv, idx_xy, idx_uv, bd_xy, bd_uv = weight_data
noise_sigma = 0.01
npts = xy.shape[0]
# add noise to data
if noise:
xy = xy + np.random.normal(0, noise_sigma, (npts, 2))
atol = 10 * noise_sigma
nclip = 3
else:
atol = _ATOL
nclip = 0
if wxy is not None:
xy[idx_xy] += bd_xy
if wuv is not None:
uv = uv.copy()
uv[idx_uv] += bd_uv
fit = linearfit.iter_linear_fit(
xy, uv, wxy=wxy, wuv=wuv, fitgeom=fitgeom, sigma=2,
clip_accum=clip_accum, nclip=nclip
)
shift_with_center = np.dot(rmat, fit['center']) - fit['center'] + shift
assert np.allclose(fit['shift'], shift_with_center, rtol=0, atol=atol)
assert np.allclose(fit['matrix'], rmat, rtol=0, atol=atol)
assert np.allclose(fit['rmse'], 0, rtol=0, atol=atol)
assert np.allclose(fit['mae'], 0, rtol=0, atol=atol)
assert np.allclose(fit['std'], 0, | |
# -*- coding: utf-8 -*-
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class SubsetTabFactory(object):
def __init__(self):
pass
def CreateSubsetTab(self):
self.tab = QtGui.QWidget()
self.tab.setObjectName(_fromUtf8("tab"))
self.verticalLayout_8 = QtGui.QVBoxLayout(self.tab)
self.verticalLayout_8.setMargin(0)
self.verticalLayout_8.setObjectName(_fromUtf8("verticalLayout_8"))
self.gridLayout_9 = QtGui.QGridLayout()
self.gridLayout_9.setMargin(10)
self.gridLayout_9.setObjectName(_fromUtf8("gridLayout_9"))
self.line_17 = QtGui.QFrame(self.tab)
self.line_17.setFrameShape(QtGui.QFrame.VLine)
self.line_17.setFrameShadow(QtGui.QFrame.Sunken)
self.line_17.setObjectName(_fromUtf8("line_17"))
self.gridLayout_9.addWidget(self.line_17, 0, 1, 3, 1)
self.tabWidget_4 = QtGui.QTabWidget(self.tab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tabWidget_4.sizePolicy().hasHeightForWidth())
self.tabWidget_4.setSizePolicy(sizePolicy)
self.tabWidget_4.setMinimumSize(QtCore.QSize(750, 200))
self.tabWidget_4.setMaximumSize(QtCore.QSize(800, 16777215))
self.tabWidget_4.setObjectName(_fromUtf8("tabWidget_4"))
self.tab_9 = QtGui.QWidget()
self.tab_9.setObjectName(_fromUtf8("tab_9"))
self.horizontalLayout_8 = QtGui.QHBoxLayout(self.tab_9)
self.horizontalLayout_8.setMargin(0)
self.horizontalLayout_8.setObjectName(_fromUtf8("horizontalLayout_8"))
self.gridLayout_6 = QtGui.QGridLayout()
self.gridLayout_6.setMargin(5)
self.gridLayout_6.setObjectName(_fromUtf8("gridLayout_6"))
self.component_treewidget = QtGui.QTreeWidget(self.tab_9)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(15)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.component_treewidget.sizePolicy().hasHeightForWidth())
self.component_treewidget.setSizePolicy(sizePolicy)
self.component_treewidget.setMaximumSize(QtCore.QSize(250, 16777215))
self.component_treewidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.component_treewidget.setAlternatingRowColors(True)
self.component_treewidget.setSelectionMode(QtGui.QAbstractItemView.NoSelection)
self.component_treewidget.setObjectName(_fromUtf8("component_treewidget"))
self.gridLayout_6.addWidget(self.component_treewidget, 0, 2, 1, 1)
self.result_treewidget = QtGui.QTreeWidget(self.tab_9)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(25)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.result_treewidget.sizePolicy().hasHeightForWidth())
self.result_treewidget.setSizePolicy(sizePolicy)
self.result_treewidget.setAlternatingRowColors(True)
self.result_treewidget.setSelectionMode(QtGui.QAbstractItemView.NoSelection)
self.result_treewidget.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.result_treewidget.setHeaderHidden(False)
self.result_treewidget.setObjectName(_fromUtf8("result_treewidget"))
self.gridLayout_6.addWidget(self.result_treewidget, 0, 0, 1, 1)
self.residual_treewidget = QtGui.QTreeWidget(self.tab_9)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.residual_treewidget.sizePolicy().hasHeightForWidth())
self.residual_treewidget.setSizePolicy(sizePolicy)
self.residual_treewidget.setMaximumSize(QtCore.QSize(16777215, 75))
self.residual_treewidget.setAlternatingRowColors(True)
self.residual_treewidget.setSelectionMode(QtGui.QAbstractItemView.NoSelection)
self.residual_treewidget.setObjectName(_fromUtf8("residual_treewidget"))
self.gridLayout_6.addWidget(self.residual_treewidget, 2, 0, 1, 3)
self.line_10 = QtGui.QFrame(self.tab_9)
self.line_10.setFrameShape(QtGui.QFrame.VLine)
self.line_10.setFrameShadow(QtGui.QFrame.Sunken)
self.line_10.setObjectName(_fromUtf8("line_10"))
self.gridLayout_6.addWidget(self.line_10, 0, 1, 1, 1)
self.line_11 = QtGui.QFrame(self.tab_9)
self.line_11.setFrameShape(QtGui.QFrame.HLine)
self.line_11.setFrameShadow(QtGui.QFrame.Sunken)
self.line_11.setObjectName(_fromUtf8("line_11"))
self.gridLayout_6.addWidget(self.line_11, 1, 0, 1, 3)
self.horizontalLayout_8.addLayout(self.gridLayout_6)
self.tabWidget_4.addTab(self.tab_9, _fromUtf8(""))
self.tab_10 = QtGui.QWidget()
self.tab_10.setObjectName(_fromUtf8("tab_10"))
self.horizontalLayout_9 = QtGui.QHBoxLayout(self.tab_10)
self.horizontalLayout_9.setMargin(0)
self.horizontalLayout_9.setObjectName(_fromUtf8("horizontalLayout_9"))
self.verticalLayout_5 = QtGui.QVBoxLayout()
self.verticalLayout_5.setMargin(5)
self.verticalLayout_5.setObjectName(_fromUtf8("verticalLayout_5"))
self.curvestat_treewidget = QtGui.QTreeWidget(self.tab_10)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.curvestat_treewidget.sizePolicy().hasHeightForWidth())
self.curvestat_treewidget.setSizePolicy(sizePolicy)
self.curvestat_treewidget.setSelectionMode(QtGui.QAbstractItemView.NoSelection)
self.curvestat_treewidget.setIndentation(0)
self.curvestat_treewidget.setColumnCount(5)
self.curvestat_treewidget.setObjectName(_fromUtf8("curvestat_treewidget"))
self.verticalLayout_5.addWidget(self.curvestat_treewidget)
self.horizontalLayout_9.addLayout(self.verticalLayout_5)
self.tabWidget_4.addTab(self.tab_10, _fromUtf8(""))
self.gridLayout_9.addWidget(self.tabWidget_4, 2, 0, 1, 1)
self.verticalLayout_7 = QtGui.QVBoxLayout()
self.verticalLayout_7.setObjectName(_fromUtf8("verticalLayout_7"))
self.gridLayout_10 = QtGui.QGridLayout()
self.gridLayout_10.setObjectName(_fromUtf8("gridLayout_10"))
self.data_combobox = QtGui.QComboBox(self.tab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.data_combobox.sizePolicy().hasHeightForWidth())
self.data_combobox.setSizePolicy(sizePolicy)
self.data_combobox.setObjectName(_fromUtf8("data_combobox"))
self.gridLayout_10.addWidget(self.data_combobox, 1, 0, 2, 1)
self.popmain_btn = QtGui.QPushButton(self.tab)
self.popmain_btn.setMaximumSize(QtCore.QSize(100, 16777215))
self.popmain_btn.setObjectName(_fromUtf8("popmain_btn"))
self.gridLayout_10.addWidget(self.popmain_btn, 3, 2, 1, 1)
self.time_combobox = QtGui.QComboBox(self.tab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.time_combobox.sizePolicy().hasHeightForWidth())
self.time_combobox.setSizePolicy(sizePolicy)
self.time_combobox.setMinimumSize(QtCore.QSize(75, 0))
self.time_combobox.setObjectName(_fromUtf8("time_combobox"))
self.time_combobox.addItem(_fromUtf8(""))
self.time_combobox.addItem(_fromUtf8(""))
self.gridLayout_10.addWidget(self.time_combobox, 1, 1, 2, 2)
self.plot_btn = QtGui.QPushButton(self.tab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.plot_btn.sizePolicy().hasHeightForWidth())
self.plot_btn.setSizePolicy(sizePolicy)
self.plot_btn.setMaximumSize(QtCore.QSize(100, 16777215))
self.plot_btn.setObjectName(_fromUtf8("plot_btn"))
self.gridLayout_10.addWidget(self.plot_btn, 3, 1, 1, 1)
self.horizontalLayout_12 = QtGui.QHBoxLayout()
self.horizontalLayout_12.setObjectName(_fromUtf8("horizontalLayout_12"))
self.enablegrid_chk = QtGui.QCheckBox(self.tab)
self.enablegrid_chk.setMinimumSize(QtCore.QSize(0, 0))
self.enablegrid_chk.setMaximumSize(QtCore.QSize(125, 16777215))
self.enablegrid_chk.setObjectName(_fromUtf8("enablegrid_chk"))
self.horizontalLayout_12.addWidget(self.enablegrid_chk)
self.uselc_chk = QtGui.QCheckBox(self.tab)
self.uselc_chk.setMaximumSize(QtCore.QSize(125, 16777215))
self.uselc_chk.setChecked(True)
self.uselc_chk.setObjectName(_fromUtf8("uselc_chk"))
self.horizontalLayout_12.addWidget(self.uselc_chk)
self.autoupdate_chk = QtGui.QCheckBox(self.tab)
self.autoupdate_chk.setMaximumSize(QtCore.QSize(125, 16777215))
self.autoupdate_chk.setObjectName(_fromUtf8("autoupdate_chk"))
self.horizontalLayout_12.addWidget(self.autoupdate_chk)
self.gridLayout_10.addLayout(self.horizontalLayout_12, 3, 0, 1, 1)
self.verticalLayout_7.addLayout(self.gridLayout_10)
self.line_16 = QtGui.QFrame(self.tab)
self.line_16.setFrameShape(QtGui.QFrame.HLine)
self.line_16.setFrameShadow(QtGui.QFrame.Sunken)
self.line_16.setObjectName(_fromUtf8("line_16"))
self.verticalLayout_7.addWidget(self.line_16)
self.plotwidget = QtGui.QWidget(self.tab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.plotwidget.sizePolicy().hasHeightForWidth())
self.plotwidget.setSizePolicy(sizePolicy)
self.plotwidget.setMinimumSize(QtCore.QSize(300, 300))
self.plotwidget.setObjectName(_fromUtf8("plotwidget"))
self.verticalLayout_7.addWidget(self.plotwidget)
self.gridLayout_9.addLayout(self.verticalLayout_7, 0, 2, 3, 1)
self.line_18 = QtGui.QFrame(self.tab)
self.line_18.setFrameShape(QtGui.QFrame.HLine)
self.line_18.setFrameShadow(QtGui.QFrame.Sunken)
self.line_18.setObjectName(_fromUtf8("line_18"))
self.gridLayout_9.addWidget(self.line_18, 1, 0, 1, 1)
self.tabWidget = QtGui.QTabWidget(self.tab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tabWidget.sizePolicy().hasHeightForWidth())
self.tabWidget.setSizePolicy(sizePolicy)
self.tabWidget.setMaximumSize(QtCore.QSize(800, 200))
self.tabWidget.setTabPosition(QtGui.QTabWidget.West)
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.tab_11 = QtGui.QWidget()
self.tab_11.setObjectName(_fromUtf8("tab_11"))
self.verticalLayout_6 = QtGui.QVBoxLayout(self.tab_11)
self.verticalLayout_6.setMargin(0)
self.verticalLayout_6.setObjectName(_fromUtf8("verticalLayout_6"))
self.gridLayout_7 = QtGui.QGridLayout()
self.gridLayout_7.setMargin(10)
self.gridLayout_7.setHorizontalSpacing(2)
self.gridLayout_7.setObjectName(_fromUtf8("gridLayout_7"))
self.jd0_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.jd0_chk.setFont(font)
self.jd0_chk.setObjectName(_fromUtf8("jd0_chk"))
self.gridLayout_7.addWidget(self.jd0_chk, 0, 0, 1, 1)
self.s1tstart_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.s1tstart_chk.setFont(font)
self.s1tstart_chk.setObjectName(_fromUtf8("s1tstart_chk"))
self.gridLayout_7.addWidget(self.s1tstart_chk, 5, 4, 1, 1)
self.dperdt_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.dperdt_chk.setFont(font)
self.dperdt_chk.setObjectName(_fromUtf8("dperdt_chk"))
self.gridLayout_7.addWidget(self.dperdt_chk, 0, 4, 1, 1)
self.tc3b_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.tc3b_chk.setFont(font)
self.tc3b_chk.setObjectName(_fromUtf8("tc3b_chk"))
self.gridLayout_7.addWidget(self.tc3b_chk, 4, 4, 1, 1)
self.el3_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.el3_chk.setFont(font)
self.el3_chk.setToolTip(_fromUtf8(""))
self.el3_chk.setObjectName(_fromUtf8("el3_chk"))
self.gridLayout_7.addWidget(self.el3_chk, 4, 6, 1, 1)
self.x1_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.x1_chk.setFont(font)
self.x1_chk.setObjectName(_fromUtf8("x1_chk"))
self.gridLayout_7.addWidget(self.x1_chk, 3, 4, 1, 1)
self.s1long_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.s1long_chk.setFont(font)
self.s1long_chk.setObjectName(_fromUtf8("s1long_chk"))
self.gridLayout_7.addWidget(self.s1long_chk, 5, 1, 1, 1)
self.t1_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.t1_chk.setFont(font)
self.t1_chk.setObjectName(_fromUtf8("t1_chk"))
self.gridLayout_7.addWidget(self.t1_chk, 2, 0, 1, 1)
self.p0_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.p0_chk.setFont(font)
self.p0_chk.setObjectName(_fromUtf8("p0_chk"))
self.gridLayout_7.addWidget(self.p0_chk, 0, 1, 1, 1)
self.marqmul_spinbox = QtGui.QSpinBox(self.tab_11)
self.marqmul_spinbox.setMaximumSize(QtCore.QSize(16777215, 25))
self.marqmul_spinbox.setProperty("value", 5)
self.marqmul_spinbox.setObjectName(_fromUtf8("marqmul_spinbox"))
self.gridLayout_7.addWidget(self.marqmul_spinbox, 0, 8, 1, 1)
self.horizontalLayout_10 = QtGui.QHBoxLayout()
self.horizontalLayout_10.setObjectName(_fromUtf8("horizontalLayout_10"))
self.label_82 = QtGui.QLabel(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.label_82.setFont(font)
self.label_82.setObjectName(_fromUtf8("label_82"))
self.horizontalLayout_10.addWidget(self.label_82)
self.label_3 = QtGui.QLabel(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.label_3.setFont(font)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.horizontalLayout_10.addWidget(self.label_3)
self.gridLayout_7.addLayout(self.horizontalLayout_10, 0, 6, 1, 2)
self.line_13 = QtGui.QFrame(self.tab_11)
self.line_13.setFrameShape(QtGui.QFrame.HLine)
self.line_13.setFrameShadow(QtGui.QFrame.Sunken)
self.line_13.setObjectName(_fromUtf8("line_13"))
self.gridLayout_7.addWidget(self.line_13, 2, 6, 1, 3)
self.clearbaseset_btn = QtGui.QPushButton(self.tab_11)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.clearbaseset_btn.sizePolicy().hasHeightForWidth())
self.clearbaseset_btn.setSizePolicy(sizePolicy)
self.clearbaseset_btn.setObjectName(_fromUtf8("clearbaseset_btn"))
self.gridLayout_7.addWidget(self.clearbaseset_btn, 3, 8, 4, 1)
self.q_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.q_chk.setFont(font)
self.q_chk.setObjectName(_fromUtf8("q_chk"))
self.gridLayout_7.addWidget(self.q_chk, 1, 3, 1, 1)
self.p3b_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.p3b_chk.setFont(font)
self.p3b_chk.setObjectName(_fromUtf8("p3b_chk"))
self.gridLayout_7.addWidget(self.p3b_chk, 4, 1, 1, 1)
self.perr0_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.perr0_chk.setFont(font)
self.perr0_chk.setObjectName(_fromUtf8("perr0_chk"))
self.gridLayout_7.addWidget(self.perr0_chk, 0, 3, 1, 1)
self.xinc3b_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.xinc3b_chk.setFont(font)
self.xinc3b_chk.setObjectName(_fromUtf8("xinc3b_chk"))
self.gridLayout_7.addWidget(self.xinc3b_chk, 4, 2, 1, 1)
self.g2_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.g2_chk.setFont(font)
self.g2_chk.setObjectName(_fromUtf8("g2_chk"))
self.gridLayout_7.addWidget(self.g2_chk, 2, 3, 1, 1)
self.s1lat_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.s1lat_chk.setFont(font)
self.s1lat_chk.setObjectName(_fromUtf8("s1lat_chk"))
self.gridLayout_7.addWidget(self.s1lat_chk, 5, 0, 1, 1)
self.s2tend_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.s2tend_chk.setFont(font)
self.s2tend_chk.setObjectName(_fromUtf8("s2tend_chk"))
self.gridLayout_7.addWidget(self.s2tend_chk, 6, 7, 1, 1)
self.s1tend_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.s1tend_chk.setFont(font)
self.s1tend_chk.setObjectName(_fromUtf8("s1tend_chk"))
self.gridLayout_7.addWidget(self.s1tend_chk, 5, 7, 1, 1)
self.l2_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.l2_chk.setFont(font)
self.l2_chk.setObjectName(_fromUtf8("l2_chk"))
self.gridLayout_7.addWidget(self.l2_chk, 3, 3, 1, 1)
self.s1tmax2_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.s1tmax2_chk.setFont(font)
self.s1tmax2_chk.setObjectName(_fromUtf8("s1tmax2_chk"))
self.gridLayout_7.addWidget(self.s1tmax2_chk, 5, 6, 1, 1)
self.label_81 = QtGui.QLabel(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.label_81.setFont(font)
self.label_81.setObjectName(_fromUtf8("label_81"))
self.gridLayout_7.addWidget(self.label_81, 1, 6, 1, 2)
self.dpdt_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.dpdt_chk.setFont(font)
self.dpdt_chk.setObjectName(_fromUtf8("dpdt_chk"))
self.gridLayout_7.addWidget(self.dpdt_chk, 0, 2, 1, 1)
self.pot1_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.pot1_chk.setFont(font)
self.pot1_chk.setObjectName(_fromUtf8("pot1_chk"))
self.gridLayout_7.addWidget(self.pot1_chk, 3, 6, 1, 1)
self.desextinc_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.desextinc_chk.setFont(font)
self.desextinc_chk.setObjectName(_fromUtf8("desextinc_chk"))
self.gridLayout_7.addWidget(self.desextinc_chk, 4, 7, 1, 1)
self.g1_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.g1_chk.setFont(font)
self.g1_chk.setObjectName(_fromUtf8("g1_chk"))
self.gridLayout_7.addWidget(self.g1_chk, 2, 2, 1, 1)
self.l1_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.l1_chk.setFont(font)
self.l1_chk.setObjectName(_fromUtf8("l1_chk"))
self.gridLayout_7.addWidget(self.l1_chk, 3, 2, 1, 1)
self.e_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.e_chk.setFont(font)
self.e_chk.setObjectName(_fromUtf8("e_chk"))
self.gridLayout_7.addWidget(self.e_chk, 1, 1, 1, 1)
self.e3b_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.e3b_chk.setFont(font)
self.e3b_chk.setObjectName(_fromUtf8("e3b_chk"))
self.gridLayout_7.addWidget(self.e3b_chk, 4, 3, 1, 1)
self.a3b_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.a3b_chk.setFont(font)
self.a3b_chk.setObjectName(_fromUtf8("a3b_chk"))
self.gridLayout_7.addWidget(self.a3b_chk, 4, 0, 1, 1)
self.s2lat_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.s2lat_chk.setFont(font)
self.s2lat_chk.setObjectName(_fromUtf8("s2lat_chk"))
self.gridLayout_7.addWidget(self.s2lat_chk, 6, 0, 1, 1)
self.s1temp_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.s1temp_chk.setFont(font)
self.s1temp_chk.setObjectName(_fromUtf8("s1temp_chk"))
self.gridLayout_7.addWidget(self.s1temp_chk, 5, 3, 1, 1)
self.s2rad_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.s2rad_chk.setFont(font)
self.s2rad_chk.setObjectName(_fromUtf8("s2rad_chk"))
self.gridLayout_7.addWidget(self.s2rad_chk, 6, 2, 1, 1)
self.pot2_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.pot2_chk.setFont(font)
self.pot2_chk.setObjectName(_fromUtf8("pot2_chk"))
self.gridLayout_7.addWidget(self.pot2_chk, 3, 7, 1, 1)
self.alb1_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.alb1_chk.setFont(font)
self.alb1_chk.setObjectName(_fromUtf8("alb1_chk"))
self.gridLayout_7.addWidget(self.alb1_chk, 2, 4, 1, 1)
self.f2_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.f2_chk.setFont(font)
self.f2_chk.setObjectName(_fromUtf8("f2_chk"))
self.gridLayout_7.addWidget(self.f2_chk, 3, 1, 1, 1)
self.s1rad_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.s1rad_chk.setFont(font)
self.s1rad_chk.setObjectName(_fromUtf8("s1rad_chk"))
self.gridLayout_7.addWidget(self.s1rad_chk, 5, 2, 1, 1)
self.s2long_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.s2long_chk.setFont(font)
self.s2long_chk.setObjectName(_fromUtf8("s2long_chk"))
self.gridLayout_7.addWidget(self.s2long_chk, 6, 1, 1, 1)
self.a_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.a_chk.setFont(font)
self.a_chk.setObjectName(_fromUtf8("a_chk"))
self.gridLayout_7.addWidget(self.a_chk, 1, 0, 1, 1)
self.f1_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.f1_chk.setFont(font)
self.f1_chk.setObjectName(_fromUtf8("f1_chk"))
self.gridLayout_7.addWidget(self.f1_chk, 3, 0, 1, 1)
self.incl_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.incl_chk.setFont(font)
self.incl_chk.setObjectName(_fromUtf8("incl_chk"))
self.gridLayout_7.addWidget(self.incl_chk, 1, 2, 1, 1)
self.t2_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.t2_chk.setFont(font)
self.t2_chk.setObjectName(_fromUtf8("t2_chk"))
self.gridLayout_7.addWidget(self.t2_chk, 2, 1, 1, 1)
self.logd_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.logd_chk.setFont(font)
self.logd_chk.setObjectName(_fromUtf8("logd_chk"))
self.gridLayout_7.addWidget(self.logd_chk, 1, 4, 1, 1)
self.s2tstart_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.s2tstart_chk.setFont(font)
self.s2tstart_chk.setObjectName(_fromUtf8("s2tstart_chk"))
self.gridLayout_7.addWidget(self.s2tstart_chk, 6, 4, 1, 1)
self.s2tmax2_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.s2tmax2_chk.setFont(font)
self.s2tmax2_chk.setObjectName(_fromUtf8("s2tmax2_chk"))
self.gridLayout_7.addWidget(self.s2tmax2_chk, 6, 6, 1, 1)
self.s2temp_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.s2temp_chk.setFont(font)
self.s2temp_chk.setObjectName(_fromUtf8("s2temp_chk"))
self.gridLayout_7.addWidget(self.s2temp_chk, 6, 3, 1, 1)
self.vlr_spinbox = QtGui.QDoubleSpinBox(self.tab_11)
self.vlr_spinbox.setMaximumSize(QtCore.QSize(16777215, 25))
self.vlr_spinbox.setDecimals(3)
self.vlr_spinbox.setMaximum(1.0)
self.vlr_spinbox.setSingleStep(0.01)
self.vlr_spinbox.setProperty("value", 1.0)
self.vlr_spinbox.setObjectName(_fromUtf8("vlr_spinbox"))
self.gridLayout_7.addWidget(self.vlr_spinbox, 1, 8, 1, 1)
self.s2tmax1_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.s2tmax1_chk.setFont(font)
self.s2tmax1_chk.setObjectName(_fromUtf8("s2tmax1_chk"))
self.gridLayout_7.addWidget(self.s2tmax1_chk, 6, 5, 1, 1)
self.s1tmax1_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.s1tmax1_chk.setFont(font)
self.s1tmax1_chk.setObjectName(_fromUtf8("s1tmax1_chk"))
self.gridLayout_7.addWidget(self.s1tmax1_chk, 5, 5, 1, 1)
self.alb2_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.alb2_chk.setFont(font)
self.alb2_chk.setObjectName(_fromUtf8("alb2_chk"))
self.gridLayout_7.addWidget(self.alb2_chk, 2, 5, 1, 1)
self.perr3b_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.perr3b_chk.setFont(font)
self.perr3b_chk.setObjectName(_fromUtf8("perr3b_chk"))
self.gridLayout_7.addWidget(self.perr3b_chk, 4, 5, 1, 1)
self.pshift_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.pshift_chk.setFont(font)
self.pshift_chk.setObjectName(_fromUtf8("pshift_chk"))
self.gridLayout_7.addWidget(self.pshift_chk, 0, 5, 1, 1)
self.x2_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.x2_chk.setFont(font)
self.x2_chk.setObjectName(_fromUtf8("x2_chk"))
self.gridLayout_7.addWidget(self.x2_chk, 3, 5, 1, 1)
self.vgam_chk = QtGui.QCheckBox(self.tab_11)
font = QtGui.QFont()
font.setPointSize(10)
self.vgam_chk.setFont(font)
self.vgam_chk.setObjectName(_fromUtf8("vgam_chk"))
self.gridLayout_7.addWidget(self.vgam_chk, 1, 5, 1, 1)
self.verticalLayout_6.addLayout(self.gridLayout_7)
self.tabWidget.addTab(self.tab_11, _fromUtf8(""))
self.tab_12 = QtGui.QWidget()
self.tab_12.setObjectName(_fromUtf8("tab_12"))
self.horizontalLayout_11 = QtGui.QHBoxLayout(self.tab_12)
self.horizontalLayout_11.setMargin(0)
self.horizontalLayout_11.setObjectName(_fromUtf8("horizontalLayout_11"))
self.gridLayout_8 = QtGui.QGridLayout()
self.gridLayout_8.setMargin(10)
self.gridLayout_8.setObjectName(_fromUtf8("gridLayout_8"))
self.del_g1_ipt = QtGui.QLineEdit(self.tab_12)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.del_g1_ipt.sizePolicy().hasHeightForWidth())
self.del_g1_ipt.setSizePolicy(sizePolicy)
self.del_g1_ipt.setMaximumSize(QtCore.QSize(16777215, 30))
self.del_g1_ipt.setObjectName(_fromUtf8("del_g1_ipt"))
self.gridLayout_8.addWidget(self.del_g1_ipt, 5, 0, 1, 1)
self.del_g2_ipt = QtGui.QLineEdit(self.tab_12)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.del_g2_ipt.sizePolicy().hasHeightForWidth())
self.del_g2_ipt.setSizePolicy(sizePolicy)
self.del_g2_ipt.setMaximumSize(QtCore.QSize(16777215, 30))
self.del_g2_ipt.setObjectName(_fromUtf8("del_g2_ipt"))
self.gridLayout_8.addWidget(self.del_g2_ipt, 5, 1, 1, 1)
self.del_alb1_ipt = QtGui.QLineEdit(self.tab_12)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.del_alb1_ipt.sizePolicy().hasHeightForWidth())
self.del_alb1_ipt.setSizePolicy(sizePolicy)
self.del_alb1_ipt.setMaximumSize(QtCore.QSize(16777215, 30))
self.del_alb1_ipt.setObjectName(_fromUtf8("del_alb1_ipt"))
self.gridLayout_8.addWidget(self.del_alb1_ipt, 5, 4, 1, 1)
self.label_64 | |
ルーセント学院 ###
### 月光學院 ###
elif input_message in ['月光學院','ルーセント学院']:
value_i = [
['繪師: 菖蒲-pixiv', 'https://i.imgur.com/vvzNgXT.jpg'],
['繪師: S.U.-pixiv', 'https://i.imgur.com/2ayupbZ.jpg'],
['繪師: Itoichi-pixiv', 'https://i.imgur.com/AsV2SJ2.jpg'],
['繪師: ヤチモト-pixiv', 'https://i.imgur.com/NEuDpHQ.jpg'],
['繪師: 関西ジン-pixiv', 'https://i.imgur.com/OUqJ5YL.jpg'],
['繪師: やまだσ-pixiv', 'https://i.imgur.com/iGOKQtN.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['美咲','ミサキ','玉泉美咲','眼球法','萬聖美咲']:
value_i = [
['繪師: レオナート-pixiv', 'https://i.imgur.com/rDrtVAC.jpg'],
['繪師: うましお-pixiv', 'https://i.imgur.com/CRQH9Ek.jpg'],
['繪師: うまるつふり-pixiv', 'https://i.imgur.com/omIhOs8.jpg'],
['繪師: しもん-pixiv', 'https://i.imgur.com/kKBE1aO.jpg'],
['繪師: アイダ-pixiv', 'https://i.imgur.com/MU4Hykc.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['伊緒','イオ','支倉伊緒','魅魔','老師','伊歐派','泳裝伊緒']:
value_i = [
['繪師: 92M-pixiv', 'https://i.imgur.com/Ohkd2DO.jpg'],
['繪師: りりか-pixiv', 'https://i.imgur.com/lev3VPT.jpg'],
['繪師: ヤマブキイロ-pixiv', 'https://i.imgur.com/aWyGiYL.jpg'],
['繪師: ひとつのなか-pixiv', 'https://i.imgur.com/DHUDWbD.jpg'],
['繪師: みすコン-pixiv', 'https://i.imgur.com/zjAQsUn.jpg'],
['繪師: sonchi-pixiv', 'https://i.imgur.com/m3qNyco.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['鈴奈','スズナ','美波鈴奈','暴弓','爆弓','模特兒','數學白癡','泳裝鈴奈']:
value_i = [
['繪師: ひとつのなか-pixiv', 'https://i.imgur.com/EjcU0Im.jpg'],
['繪師: 結城辰也-pixiv', 'https://i.imgur.com/MkOrjea.jpg'],
['繪師: YH-pixiv', 'https://i.imgur.com/JrrgGN6.jpg'],
['繪師: 天雷-pixiv', 'https://i.imgur.com/e7OsQRB.jpg'],
['繪師: ダーゴ-pixiv', 'https://i.imgur.com/ILNauCi.jpg'],
['繪師: Rona-pixiv', 'https://i.imgur.com/fm0Pk7i.jpg'],
['繪師: 電解水-pixiv', 'https://i.imgur.com/Wykgysq.jpg'],
['繪師: フジフジ-pixiv', 'https://i.imgur.com/bXNXfuR.jpg'],
['繪師: PoLa-pixiv', 'https://i.imgur.com/hxJbcud.jpg'],
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
### 拉比林斯 ###
### ラビリンス ###
### 拉比林斯 ###
elif input_message in ['拉比林斯','ラビリンス']:
value_i = [
['繪師: みず-pixiv', 'https://i.imgur.com/F9SXxTp.jpg'],
['繪師: ユキタカ-pixiv', 'https://i.imgur.com/iQVOxk2.jpg'],
['繪師: みどりのちゃ-pixiv', 'https://i.imgur.com/2wbKiAy.jpg'],
['繪師: 秋月リア-pixiv', 'https://i.imgur.com/NRgmRRj.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['靜流','シズル','星野靜流','姐姐','姊姊','弟控','情人節靜流']:
value_i = [
['繪師: watchdog_rol-pixiv', 'https://i.imgur.com/Xu8PE9b.jpg'],
['繪師: まよ丼-pixiv', 'https://i.imgur.com/nF8bib2.jpg'],
['繪師: ナ²-pixiv', 'https://i.imgur.com/a8BLBak.jpg'],
['繪師: セーラ-pixiv', 'https://i.imgur.com/e9sXXu5.jpg'],
['繪師: 坊橋夜泊-pixiv', 'https://i.imgur.com/YklV0js.jpg'],
['繪師: ロアン-pixiv', 'https://i.imgur.com/hAK5NYP.jpg'],
['繪師: みず-pixiv', 'https://i.imgur.com/oU6wWfr.jpg'],
['繪師: ひとつのなか-pixiv', 'https://i.imgur.com/IlnJe8Z.jpg'],
['繪師: 千里凌酱-pixiv', 'https://i.imgur.com/qcKvMcr.jpg'],
['繪師: 千里凌酱-pixiv', 'https://i.imgur.com/1ANXoEU.jpg'],
['繪師: ddolggol-pixiv', 'https://i.imgur.com/kBRxhWE.jpg'],
['繪師: RYUKI-pixiv', 'https://i.imgur.com/Hzf2VCF.jpg'],
['繪師: Itoichi-pixiv', 'https://i.imgur.com/3hf7n4q.jpg'],
['繪師: horosuku-pixiv', 'https://i.imgur.com/aLWX0XE.png'],
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['妹弓','梨乃','璃乃','リノ','智障','笨蛋','衣之咲璃乃','奇幻璃乃']:
value_i = [
['https://i.imgur.com/1eLEkSN.jpg'],
['繪師: 真崎ケイ-pixiv', 'https://i.imgur.com/uKiWtdI.jpg'],
['繪師: Mauve-pixiv', 'https://i.imgur.com/3SBQq5o.jpg'],
['繪師: HIROKAZU-pixiv', 'https://i.imgur.com/BWXJYH8.jpg'],
['繪師: HIROKAZU-pixiv', 'https://i.imgur.com/OlNs5LG.jpg'],
['繪師: HIROKAZU-pixiv', 'https://i.imgur.com/lD2qFUi.jpg'],
['繪師: HIROKAZU-pixiv', 'https://i.imgur.com/qSiPpAc.jpg'],
['繪師: HIROKAZU-pixiv', 'https://i.imgur.com/hJitlbn.jpg'],
['繪師: みず-pixiv', 'https://i.imgur.com/ul5x7d4.jpg'],
['繪師: アイダ-pixiv', 'https://i.imgur.com/RTySuyH.jpg'],
['繪師: @hirokazutw-twitter', 'https://i.imgur.com/U2MBZb8.jpg'],
['繪師: @yantaro5446-twitter', 'https://i.imgur.com/eCSCr5x.jpg']
]
if(len(value_i[i% len(value_i)])==2):
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text= value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
else:
line_bot_api.reply_message(event.reply_token,ImageMessageURL(value_i[i% len(value_i)][0]))
### 森林守衛 ###
### フォレスティエ ###
### 森林守衛 ###
elif input_message in ['美里','愛川美里','ミサト','聖母','美里老師','水母','泳裝美里']:
value_i = [
['繪師: @monmon_shimon_-twitter', 'https://i.imgur.com/QsArrQW.jpg'],
['繪師: @Hello_pty-twitter', 'https://i.imgur.com/88X1SpO.jpg'],
['繪師: @shotenana-twitter', 'https://i.imgur.com/671lWeD.jpg'],
['繪師: @teffish-twitter', 'https://i.imgur.com/gyiQlHA.jpg'],
['繪師: @92M-twitter', 'https://i.imgur.com/SdgoaDF.jpg'],
['繪師: ヤマブキイロ-pixiv', 'https://i.imgur.com/zdgNpBt.jpg'],
['繪師: ぼたやん-pixiv', 'https://i.imgur.com/iK5bus4.jpg'],
['繪師: らんち-pixiv', 'https://i.imgur.com/PR3QIKs.jpg'],
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['碧','アオイ','雙葉碧','香菜弓'] or (input_message[:2] == '邊緣' and len(input_message) <= 4) :
value_i = [
['繪師: @kurororo_rororo-twitter', 'https://i.imgur.com/B9I4bm1.jpg'],
['繪師: ミチル-pixiv', 'https://i.imgur.com/FVpUqpf.jpg'],
['繪師: やま兎-pixiv', 'https://i.imgur.com/7B82lli.jpg'],
['繪師: すけsk-pixiv', 'https://i.imgur.com/Mmw25L7.jpg'],
['繪師: 秋ナス-pixiv', 'https://i.imgur.com/cUPv6eu.jpg'],
['繪師: 桜木ゆうき-pixiv', 'https://i.imgur.com/kiHg9WS.jpg'],
['繪師: 鳩家-pixiv', 'https://i.imgur.com/2J64V6T.jpg'],
['繪師: mare II-pixiv', 'https://i.imgur.com/jQ9NYWp.jpg'],
['繪師: @oriknp-twitter', 'https://i.imgur.com/Hgn9YeX.jpg'],
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['初音','ハツネ','柏崎初音','睡美人','泳裝初音']:
value_i = [
['繪師: ヤンタロウ-pixiv', 'https://i.imgur.com/QQ5alwd.jpg'],
['繪師: TYTS-pixiv', 'https://i.imgur.com/jTo5qH3.jpg'],
['繪師: 結城辰也-pixiv', 'https://i.imgur.com/UtkMYdI.jpg'],
['繪師: ゆりりん-pixiv', 'https://i.imgur.com/5E6XgR8.jpg'],
['繪師: ジャンク堂-pixiv', 'https://i.imgur.com/WTIywxi.jpg'],
['繪師: meel-pixiv', 'https://i.imgur.com/xN2lnOm.jpg'],
['繪師: 天雷-pixiv', 'https://i.imgur.com/F788xfj.jpg'],
['繪師: sonchi-pixiv', 'https://i.imgur.com/AXTx6rO.jpg'],
['繪師: ゆんみ-pixiv', 'https://i.imgur.com/2DJUfQU.jpg'],
['繪師: ひことう(彥灯)-pixiv', 'https://i.imgur.com/5JawGjF.jpg'],
['繪師: @men0105-twitter', 'https://i.imgur.com/MQxElt8.jpg'],
['繪師: @EN6cUMxx0rE6FFz-twitter', 'https://i.imgur.com/QxdVINo.jpg'],
['繪師: Misekiss-pixiv', 'https://i.imgur.com/t6cncRh.jpg'],
['繪師: @AraiGyuren-twitter', 'https://i.imgur.com/9oAUgeY.jpg'],
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
### 純白之翼 蘭德索爾分部 ###
### ヴァイスフリューゲル ランドソル支部 ###
### 純白之翼 ###
elif input_message in ['純白之翼','ヴァイスフリューゲル ランドソル支部','純白之翼 蘭德索爾分部','奇葩公會']:
value_i = [
['繪師: ぬるぷよ-pixiv', 'https://i.imgur.com/tio37LX.jpg'],
['繪師: なかひま-pixiv', 'https://i.imgur.com/hyxY4Hi.jpg'],
['繪師: うせつ(右折)-pixiv', 'https://i.imgur.com/DANzNSk.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['妮諾','ニノン','扇子','忍者','江戶妮諾']:
value_i = [
['繪師: たてじまうり-pixiv', 'https://i.imgur.com/e1CEWSd.jpg'],
['繪師: ぬるぷよ-pixiv', 'https://i.imgur.com/UMpxZQ7.jpg'],
['繪師: S.U.-pixiv', 'https://i.imgur.com/8YWxDvV.jpg'],
['繪師: phobishu-pixiv', 'https://i.imgur.com/1vWMYAr.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['莫妮卡','モニカ','毛二力','Monika','monika','魔法少女莫妮卡']:
value_i = [
['跳到just monika彩蛋'],
['繪師: まぉー。-pixiv', 'https://i.imgur.com/pHPN52u.jpg'],
['繪師: 浣狸-pixiv', 'https://i.imgur.com/IZgpNuR.jpg'],
['繪師: 水無月みず-pixiv', 'https://i.imgur.com/uqUbiik.jpg'],
['繪師: 紅薙ようと-pixiv', 'https://i.imgur.com/8XffJLz.jpg'],
['繪師: 引きニート-pixiv', 'https://i.imgur.com/duJmuoQ.jpg'],
['繪師: AJ-pixiv', 'https://i.imgur.com/idWxFcC.jpg'],
['繪師: さくじ-pixiv', 'https://i.imgur.com/ootpbPh.jpg'],
['繪師: まぉー。-pixiv', 'https://i.imgur.com/id3cEAo.jpg'],
]
try:
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
except:
value_i = [
['騎士...君......\nj̶̧̄u̸̬͌s̸̡̋t̴̬͘ ̴̣̀m̸̪͘ỏ̶̺n̵̙̕ȉ̷̢ǩ̷̜ã̷̠', '繪師: Yampa-pixiv', 'https://i.imgur.com/arYgHgh.jpg'],
['騎士...君......\nj̶̧̄u̸̬͌s̸̡̋t̴̬͘ ̴̣̀m̸̪͘ỏ̶̺n̵̙̕ȉ̷̢ǩ̷̜ã̷̠', '繪師: ヒシ馬-pixiv', 'https://i.imgur.com/QHIY62W.jpg'],
['騎士...君......\nj̶̧̄u̸̬͌s̸̡̋t̴̬͘ ̴̣̀m̸̪͘ỏ̶̺n̵̙̕ȉ̷̢ǩ̷̜ã̷̠', '繪師: 麦飴 アンプ-pixiv', 'https://i.imgur.com/v8Cu6fX.jpg'],
['騎士...君......\nj̶̧̄u̸̬͌s̸̡̋t̴̬͘ ̴̣̀m̸̪͘ỏ̶̺n̵̙̕ȉ̷̢ǩ̷̜ã̷̠', '繪師: Tsunゼイ-pixiv', 'https://i.imgur.com/GeiKkKn.jpg'],
['騎士...君......\nj̶̧̄u̸̬͌s̸̡̋t̴̬͘ ̴̣̀m̸̪͘ỏ̶̺n̵̙̕ȉ̷̢ǩ̷̜ã̷̠', '繪師: Sasoura-pixiv', 'https://i.imgur.com/B9FumFa.jpg'],
['騎士...君......\nj̶̧̄u̸̬͌s̸̡̋t̴̬͘ ̴̣̀m̸̪͘ỏ̶̺n̵̙̕ȉ̷̢ǩ̷̜ã̷̠', '繪師: Heaven’s Melody-pixiv', 'https://i.imgur.com/V7QaIkI.jpg'],
['騎士...君......\nj̶̧̄u̸̬͌s̸̡̋t̴̬͘ ̴̣̀m̸̪͘ỏ̶̺n̵̙̕ȉ̷̢ǩ̷̜ã̷̠', '繪師: 麦飴 アンプ-pixiv', 'https://i.imgur.com/j8JOeW8.jpg'],
['騎士...君......\nj̶̧̄u̸̬͌s̸̡̋t̴̬͘ ̴̣̀m̸̪͘ỏ̶̺n̵̙̕ȉ̷̢ǩ̷̜ã̷̠', '繪師: Satchel-pixiv', 'https://i.imgur.com/MB0QAZv.jpg'],
['騎士...君......\nj̶̧̄u̸̬͌s̸̡̋t̴̬͘ ̴̣̀m̸̪͘ỏ̶̺n̵̙̕ȉ̷̢ǩ̷̜ã̷̠', '繪師: HOmme-pixiv', 'https://i.imgur.com/Wop3hWH.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),TextSendMessage(text = value_i[i% len(value_i)][1]),ImageMessageURL(value_i[i% len(value_i)][2])])
elif input_message in ['空花','クウカ','抖M','抖m','江戶空花'] :
value_i = [
['繪師: ダーゴ-pixiv', 'https://i.imgur.com/JagI34h.jpg'],
['繪師: ジヤス-pixiv', 'https://i.imgur.com/J8pKPT0.jpg'],
['繪師: 桶乃かもく-pixiv', 'https://i.imgur.com/u5OAmLp.jpg'],
['繪師: たぐ-pixiv', 'https://i.imgur.com/2gUWFwE.jpg'],
['繪師: えぴ-pixiv', 'https://i.imgur.com/HkxfmUi.jpg'],
['繪師: S.U.-pixiv', 'https://i.imgur.com/VVWeLcP.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['步未','アユミ','石橋步未','路人妹' ,'路人','奇幻步未'] :
value_i = [
['繪師: あやみゆき-pixiv', 'https://i.imgur.com/wugza8u.jpg'],
['繪師: セランポーレ-pixiv', 'https://i.imgur.com/YgCOdxJ.jpg'],
['繪師: Acuma-pixiv', 'https://i.imgur.com/2L8K0D4.jpg'],
['繪師: 巧克力酱嗷-pixiv', 'https://i.imgur.com/HVaTlao.jpg'],
['繪師: セーリュー-pixiv', 'https://i.imgur.com/eAt5NmX.jpg'],
['繪師: スギユウ-pixiv', 'https://i.imgur.com/pf414Hl.jpg'],
['繪師: 関西ジン-pixiv', 'https://i.imgur.com/n33p8Nr.jpg'],
['繪師: ぐま-pixiv', 'https://i.imgur.com/cKwmATV.jpg'],
['繪師: スギユウ-pixiv', 'https://i.imgur.com/fG2T97o.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['雪','アユミ','小雪','虹村雪','雪哥','偽娘','女装大佬','自戀狂'] :
value_i = [
['繪師: ダーゴ-pixiv', 'https://i.imgur.com/5WVPTLL.jpg'],
['繪師: ねこちゃん-pixiv', 'https://i.imgur.com/6eQnJpA.jpg'],
['繪師: りこ-pixiv', 'https://i.imgur.com/qQi6c2M.jpg'],
['繪師: りこ-pixiv', 'https://i.imgur.com/aoMKsKA.jpg'],
['繪師: ASLE-pixiv', 'https://i.imgur.com/ptmTKlR.jpg'],
['繪師: みさき-pixiv', 'https://i.imgur.com/5FBxwBT.jpg'],
['繪師: ぐっち庵-pixiv', 'https://i.imgur.com/AeoEaDd.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
### 咲戀救護院 ###
### サレンディア救護院 ###
### 救護院 ###
elif input_message in ['咲戀救護院','サレンディア救護院','救護院']:
value_i = [
['繪師: S.U.-pixiv', 'https://i.imgur.com/7gMuqoy.jpg'],
['繪師: AJ-pixiv', 'https://i.imgur.com/tzQswOy.jpg'],
['繪師: ヤチモト-pixiv', 'https://i.imgur.com/BQpIStn.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['咲戀','咲戀媽媽','充電寶','泳媽','媽','サレン','泳裝咲戀','聖誕咲戀']:
value_i = [
['繪師: らんち-pixiv', 'https://i.imgur.com/JV5BTEz.jpg'],
['繪師: hemachi-pixiv', 'https://i.imgur.com/2teJ0AL.jpg'],
['繪師: SeeUmai-pixiv', 'https://i.imgur.com/8jiJdzM.jpg'],
['繪師: カケル-pixiv', 'https://i.imgur.com/LM8RSJw.jpg'],
['繪師: つかさ-pixiv', 'https://i.imgur.com/vvwxljH.jpg'],
['繪師: アリア-pixiv', 'https://i.imgur.com/HcHuwDl.jpg'],
['繪師: atychi-pixiv', 'https://i.imgur.com/z8WnFpy.jpg'],
['繪師: あんべよしろう-pixiv', 'https://i.imgur.com/3J0rt2k.jpg'],
['繪師: EpicLoot-pixiv', 'https://i.imgur.com/C7PEdmq.jpg'],
['繪師: ヒーロー-pixiv', 'https://i.imgur.com/HANfFFb.jpg'],
['繪師: ZN (あえん)-pixiv', 'https://i.imgur.com/MI7NZIS.jpg'],
['繪師: @MtxzBNBROukHQzl-twitter', 'https://i.imgur.com/CbGxQO3.jpg'],
['繪師: ZN (あえん)-pixiv', 'https://i.imgur.com/UFMHwZb.jpg'],
['繪師: むらさめしん-pixiv', 'https://i.imgur.com/j29GiaM.jpg'],
['繪師: むらさめしん-pixiv', 'https://i.imgur.com/AYLcF6I.jpg'],
['繪師: らんち-pixiv', 'https://i.imgur.com/C2Zkm3B.jpg'],
['繪師: らんち-pixiv', 'https://i.imgur.com/ihylo6y.jpg'],
['繪師: あむりた様2号-pixiv', 'https://i.imgur.com/pCqVbRJ.jpg'],
['繪師: @_mi_rei-twitter', 'https://i.imgur.com/tCMlVhe.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['鈴莓','スズメ','女僕','恐怖份子','天野鈴莓','正月鈴莓','泳裝鈴莓']:
value_i = [
['繪師: ダーゴ-pixiv', 'https://i.imgur.com/Mj7Vxxc.jpg'],
['繪師: ダーゴ-pixiv', 'https://i.imgur.com/YJMAbHJ.jpg'],
['繪師: ダーゴ-pixiv', 'https://i.imgur.com/QduwCSX.jpg'],
['繪師: ROIN-pixiv', 'https://i.imgur.com/k4weIQw.jpg'],
['繪師: りこ-pixiv', 'https://i.imgur.com/zvnXYcT.jpg'],
['繪師: Set-pixiv', 'https://i.imgur.com/z5wHpnK.jpg'],
['繪師: 天雷-pixiv', 'https://i.imgur.com/yx82sjg.jpg'],
['繪師: sonchi-pixiv', 'https://i.imgur.com/tAIXB6g.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['綾音','アヤネ','北條綾音','熊錘','噗吉','聖誕綾音']:
value_i = [
['繪師: 夢乃杜-pixiv', 'https://i.imgur.com/G4lAvYH.jpg'],
['繪師: うまるつふり-pixiv', 'https://i.imgur.com/T0IabEQ.jpg'],
['繪師: 世音-pixiv', 'https://i.imgur.com/vVMd7HJ.jpg'],
['繪師: けいらん-pixiv', 'https://i.imgur.com/vZI82po.jpg'],
['繪師: うまるつふり-pixiv', 'https://i.imgur.com/sA2s1hL.jpg'],
['繪師: 天雷-pixiv', 'https://i.imgur.com/q4WHogN.png'],
['繪師: ダーゴ-pixiv', 'https://i.imgur.com/Lz3mk9N.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['胡桃','クルミ','栗林胡桃','聖誕胡桃']:
value_i = [
['繪師: 関西ジン-pixiv', 'https://i.imgur.com/h5SinVW.jpg'],
['繪師: ミュー-pixiv', 'https://i.imgur.com/CtTm2kO.jpg'],
['繪師: AJ-pixiv', 'https://i.imgur.com/iZlOaV3.jpg'],
['繪師: えむ-pixiv', 'https://i.imgur.com/1mYlZ9n.jpg'],
['繪師: RYUKI-pixiv', 'https://i.imgur.com/G64Xivs.jpg'],
['繪師: ダーゴ-pixiv', 'https://i.imgur.com/YTTNWgL.jpg'],
['繪師: @gucchiponponpon-twitter', 'https://i.imgur.com/Uj0jVzB.jpg'],
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
### 墨丘利財團 ###
### メルクリウス財団 ###
### 財團 ###
elif input_message in ['墨丘利財團','メルクリウス財団','財團']:
value_i = [
['繪師: AJ-pixiv', 'https://i.imgur.com/S3Ld3So.jpg'],
['繪師: 夜凪朝妃-pixiv', 'https://i.imgur.com/ZsHkXJm.jpg'],
['繪師: ヤチモト-pixiv', 'https://i.imgur.com/Zo5tJYw.jpg'],
['繪師: HIROKAZU-pixiv', 'https://i.imgur.com/7VNue19.jpg'],
['繪師: 夜凪朝妃-pixiv', 'https://i.imgur.com/kVkppDM.jpg'],
['繪師: AJ-pixiv', 'https://i.imgur.com/VJEvtlM.jpg'],
['繪師: あかざてり-pixiv', 'https://i.imgur.com/LGWONXZ.jpg'],
['繪師: こうちゃ。-pixiv', 'https://i.imgur.com/1eKDiOg.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['秋乃','アキノ','聖誕秋乃','墨丘利財團唯一指定三星','財團之恥']:
value_i = [
['繪師: みずなし-pixiv', 'https://i.imgur.com/nLPrz2D.jpg'],
['繪師: ダーゴ-pixiv', 'https://i.imgur.com/8PEV511.jpg'],
['繪師: 真宮原ヒトシゲ-pixiv', 'https://i.imgur.com/5wbSJ5G.jpg'],
['繪師: ヒーロー-pixiv', 'https://i.imgur.com/0Ibk5HR.jpg'],
['繪師: 天雷-pixiv', 'https://i.imgur.com/Pp7pMVe.jpg'],
['繪師: sonchi-pixiv', 'https://i.imgur.com/b2w8CMp.jpg'],
['繪師: ぐっち庵-pixiv', 'https://i.imgur.com/PM02VxP.jpg'],
['繪師: @3gita219_-twitter', 'https://i.imgur.com/Cf8xbgM.jpg'],
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['優花梨','聖誕優花梨','ユカリ','酒鬼']:
value_i = [
['繪師: けんぴゃっ-pixiv', 'https://i.imgur.com/3grit6p.jpg'],
['繪師: 石川健太-pixiv', 'https://i.imgur.com/e28UBg8.jpg'],
['繪師: 天雷-pixiv', 'https://i.imgur.com/2ShceE9.jpg'],
['繪師: 鳩尾-pixiv', 'https://i.imgur.com/kFqvMMn.jpg'],
['繪師: 昌未-pixiv', 'https://i.imgur.com/Dv4rJgh.jpg'],
['繪師: りこ-pixiv', 'https://i.imgur.com/LQRJRp7.jpg'],
['繪師: 7010-pixiv', 'https://i.imgur.com/sU3Ceak.jpg'],
['繪師: sonchi-pixiv', 'https://i.imgur.com/5eHL47t.jpg'],
['繪師: まぉー。-pixiv', 'https://i.imgur.com/x4WaX1b.jpg'],
['繪師: ぐっち庵-pixiv', 'https://i.imgur.com/2n2O2q3.jpg'],
['繪師: PTD-pixiv', 'https://i.imgur.com/Hefwndh.jpg'],
['繪師: ミュー-pixiv', 'https://i.imgur.com/7bVx6fN.jpg'],
['繪師: @srm_chi-twitter', 'https://i.imgur.com/cqoR7cE.jpg'],
['繪師: @dosukoi_fresh-twitter', 'https://i.imgur.com/n4cY09W.jpg'],
['繪師: @Akao_kito-twitter', 'https://i.imgur.com/rlmKHjU.jpg'],
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['珠希','タマキ','宮坂珠希','貓劍','貓賊','泳裝珠希']:
value_i = [
['https://i.imgur.com/Y6Hubmx.jpg'],
['繪師: Donutman-pixiv', 'https://i.imgur.com/7fsnRcy.jpg'],
['繪師: トプ-pixiv', 'https://i.imgur.com/adKZbm0.jpg'],
['繪師: 水無月みず-pixiv', 'https://i.imgur.com/IodPy4h.jpg'],
['繪師: ROIN-pixiv', 'https://i.imgur.com/hgc0rr5.jpg'],
['繪師: あんず-pixiv', 'https://i.imgur.com/uPj8mrO.jpg'],
['繪師: ぐっち庵-pixiv', 'https://i.imgur.com/xKXragw.jpg'],
['繪師: ダーゴ-pixiv', 'https://i.imgur.com/llc5HX0.jpg']
]
if(len(value_i[i% len(value_i)])==2):
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text= value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
else:
line_bot_api.reply_message(event.reply_token,ImageMessageURL(value_i[i% len(value_i)][0]))
elif input_message in ['美冬','ユカリ','大神美冬','屠龍者','打工仔','泳裝美冬']:
value_i = [
['繪師: AJ-pixiv', 'https://i.imgur.com/YwHKCSK.jpg'],
['繪師: ぐっち庵-pixiv', 'https://i.imgur.com/t0AagIv.jpg'],
['繪師: プトン-pixiv', 'https://i.imgur.com/1I33uq2.jpg'],
['繪師: れつな-pixiv', 'https://i.imgur.com/TiCccLi.jpg'],
['繪師: あんず-pixiv', 'https://i.imgur.com/51bzHBf.jpg'],
['繪師: リブッチ-pixiv', 'https://i.imgur.com/2hCLlqE.jpg'],
['繪師: 水ようかん-pixiv', 'https://i.imgur.com/vOyBmZB.jpg'],
['繪師: ミュー-pixiv', 'https://i.imgur.com/aAxUlIQ.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message == '無人島':
value_i = [
['繪師: 161803393-pixiv', 'https://i.imgur.com/XYGSCyu.jpg'],
['繪師: AJ-pixiv', 'https://i.imgur.com/yxXnL0e.jpg'],
['繪師: あかざてり-pixiv', 'https://i.imgur.com/o2JUEOw.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
### 自衛團 ###
### カォン自警団 ###
### 哞哞自衛隊 ###
elif input_message in ['哞哞自衛隊','自衛隊','カォン自警団']:
value_i = [
['繪師: AJ-pixiv', 'https://i.imgur.com/i9BKQpj.jpg'],
['繪師: ぬるぷよ-pixiv', 'https://i.imgur.com/5BnCetn.jpg'],
['繪師: AJ-pixiv', 'https://i.imgur.com/EwCexQp.jpg'],
['繪師: WaterRing-pixiv', 'https://i.imgur.com/qQGcoBX.jpg'],
['繪師: MaJiang-pixiv', 'https://i.imgur.com/dArZxel.jpg'],
['繪師: konigstigerchan-pixiv', 'https://i.imgur.com/kkNl4dX.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['真步','マホ','姬宫真步','真步公主','公主病','泳裝真步']:
value_i = [
['繪師: S.U.-pixiv', 'https://i.imgur.com/yfgrop2.jpg'],
['繪師: ぺろんちょ-pixiv', 'https://i.imgur.com/npCDt3p.jpg'],
['繪師: 猫小渣-pixiv', 'https://i.imgur.com/SLSkhAO.jpg'],
['繪師: yamchu-pixiv', 'https://i.imgur.com/nbs4CXK.jpg'],
['繪師: JMao-pixiv', 'https://i.imgur.com/qo2wk18.jpg'],
['繪師: ダーゴ-pixiv', 'https://i.imgur.com/dn9kpoN.jpg'],
['繪師: 水無月みず-pixiv', 'https://i.imgur.com/iNKJY7T.jpg'],
['繪師: 傻蛋喵-pixiv', 'https://i.imgur.com/mp8YBnO.jpg'],
['繪師: 傻蛋喵-pixiv', 'https://i.imgur.com/5ttEoW0.jpg'],
['繪師: 7010-pixiv', 'https://i.imgur.com/7LNxWXT.jpg'],
['繪師: 凤鸢-pixiv', 'https://i.imgur.com/PCK4fdC.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['霞','カスミ','驢妹','偵探','水瀨祈','魔法少女霞','阿里巴巴大霞','阿里巴巴大俠']:
value_i = [
['繪師: AJ-pixiv', 'https://i.imgur.com/i9BKQpj.jpg'],
['繪師: aono-pixiv', 'https://i.imgur.com/vTNr4Ow.jpg'],
['RANK4霞,繪師: Mauve-pixiv', 'https://i.imgur.com/RY2NT5k.jpg'],
['RANK7霞,繪師: Mauve-pixiv', 'https://i.imgur.com/4rmJYo4.jpg'],
['RANK9霞,繪師: Mauve-pixiv', 'https://i.imgur.com/SNMRaLm.jpg'],
['RANK10霞,繪師: Mauve-pixiv', 'https://i.imgur.com/wu8SXNS.jpg'],
['繪師: みり-pixiv', 'https://i.imgur.com/MFPwEbM.jpg'],
['繪師: ゆりりん-pixiv', 'https://i.imgur.com/xNXb4pA.jpg'],
['繪師: あめ。-pixiv', 'https://i.imgur.com/mSfnH5W.jpg'],
['繪師: 骨カワ-pixiv', 'https://i.imgur.com/cn6i63j.jpg'],
['繪師: あやみゆき-pixiv', 'https://i.imgur.com/FQhW6Iw.jpg'],
['繪師: 紫桐シート-pixiv', 'https://i.imgur.com/4zQde23.jpg'],
['繪師: ド赤-pixiv', 'https://i.imgur.com/M57ENmC.jpg'],
['繪師: みり-pixiv', 'https://i.imgur.com/gcywpch.jpg'],
| |
<filename>autoimpute/imputations/mis_classifier.py
"""Module to predict missingness in data and generate imputation test cases.
This module contains the MissingnessClassifier, which is used to predict
missingness within a dataset using information derived from other features.
The MissingnessClassifier also generates test cases for imputation. Often,
we do not and will never have the true value of a missing data point,
so its challenging to validate an imputation model's performance.
The MissingnessClassifer generates missing "test" samples from observed
that have high likelihood of being missing, which a user can then "impute".
This practice is useful to validate models that contain truly missing data.
"""
import warnings
import numpy as np
import pandas as pd
from xgboost import XGBClassifier
from sklearn.base import clone, BaseEstimator, ClassifierMixin
from sklearn.utils.validation import check_is_fitted
from autoimpute.utils import check_nan_columns, check_predictors_fit
# pylint:disable=attribute-defined-outside-init
# pylint:disable=arguments-differ
# pylint:disable=too-many-arguments
# pylint:disable=too-many-instance-attributes
class MissingnessClassifier(BaseEstimator, ClassifierMixin):
"""Classify values as missing or not, based on missingness patterns.
The class has has numerous use cases. First, it fits columns of a DataFrame
and predicts whether or not an observation is missing, based on all
available information in other columns. The class supports both class
prediction and class probabilities.
Second, the class can generate test cases for imputation analysis. Test
cases are values that are truly observed but have a high probability of
being missing. These cases make imputation process supervised as opposed
to unsupervised. A user never knows the true value of missing data but can
verify imputation methods on test cases for which the true value is known.
"""
def __init__(self, classifier=None, predictors="all"):
"""Create an instance of the MissingnessClassifier.
The MissingnessClassifier inherits from sklearn BaseEstimator and
ClassifierMixin. This inheritence and this class' implementation
ensure that the MissingnessClassifier is a valid classifier that will
work in an sklearn pipeline.
Args:
classifier (classifier, optional): valid classifier from sklearn.
If None, default is xgboost. Note that classifier must
conform to sklearn style. This means it must implement the
`predict_proba` method and act as a porper classifier.
predictors (str, iter, dict, optiona): defaults to all, i.e.
use all predictors. If all, every column will be used for
every class prediction. If a list, subset of columns used for
all predictions. If a dict, specify which columns to use as
predictors for each imputation. Columns not specified in dict
will receive `all` by default.
"""
self.classifier = classifier
self.predictors = predictors
@property
def classifier(self):
"""Property getter to return the value of the classifier property"""
return self._classifier
@classifier.setter
def classifier(self, c):
"""Validate the classifier property and set default parameters.
Args:
c (classifier): if None, implement the xgboost classifier
Raises:
ValueError: classifier does not implement `predict_proba`
"""
if c is None:
self._classifier = XGBClassifier()
else:
m = "predict_proba"
if not hasattr(c, m):
raise ValueError(f"Classifier must implement {m} method.")
self._classifier = c
def _fit_strategy_validator(self, X):
"""Internal helper method to validate behavior appropriate for fit."""
# remove nan columns and store colnames
cols = X.columns.tolist()
self._preds = check_predictors_fit(self.predictors, cols)
# next, prep the categorical / numerical split
# only necessary for classes that use other features
# wont see this requirement in the single imputer
self.data_mi = X.isnull().astype(int)
def _predictor_strategy_validator(self, X):
"""Private method to prep for prediction."""
# initial checks before transformation
check_is_fitted(self, "statistics_")
# check dataset features are the same for both fit and transform
X_cols = X.columns.tolist()
mi_cols = self.data_mi.columns.tolist()
diff_X = set(X_cols).difference(mi_cols)
diff_mi = set(mi_cols).difference(X_cols)
if diff_X or diff_mi:
raise ValueError("Same columns must appear in fit and predict.")
@check_nan_columns
def fit(self, X, **kwargs):
"""Fit an individual classifier for each column in the DataFrame.
For each feature in the DataFrame, a classifier (default: xgboost) is
fit with the feature as the response (y) and all other features as
covariates (X). The resulting classifiers are stored in the class
instance statistics. One `fit` for each column in the dataset. Column
specification will be supported as well.
Args:
X (pd.DataFrame): DataFrame on which to fit classifiers
**kwargs: keyword arguments used by classifiers
Returns:
self: instance of MissingnessClassifier
"""
# start with fit checks
self._fit_strategy_validator(X)
self.statistics_ = {}
# iterate missingness fit using classifier and all remaining columns
for column in self.data_mi:
# only fit non time-based columns...
if not np.issubdtype(column, np.datetime64):
y = self.data_mi[column]
preds = self._preds[column]
if preds == "all":
x = X.drop(column, axis=1)
else:
x = X[preds]
clf = clone(self.classifier)
cls_fit = clf.fit(x.values, y.values, **kwargs)
self.statistics_[column] = cls_fit
return self
@check_nan_columns
def predict(self, X, **kwargs):
"""Predict class of each feature. 1 for missing; 0 for not missing.
First checks to ensure data has been fit. If fit, `predict` method
uses the respective classifier of each feature (stored in statistics)
and predicts class membership for each observation of each feature.
1 = missing; 0 = not missing. Prediction is binary, as class membership
is hard. If probability deesired, use `predict_proba` method.
Args:
X (pd.DataFrame): DataFrame used to create predictions.
kwargs: kewword arguments. Used by the classifer.
Returns:
pd.DataFrame: DataFrame with class prediction for each observation.
"""
# predictions for each column using respective fit classifier
self._predictor_strategy_validator(X)
preds_mat = []
for column in self.data_mi:
if not np.issubdtype(column, np.datetime64):
preds = self._preds[column]
if preds == "all":
x = X.drop(column, axis=1)
else:
x = X[preds]
cls_fit = self.statistics_[column]
y_pred = cls_fit.predict(x.values, **kwargs)
preds_mat.append(y_pred)
else:
y_pred = np.zeros(len(self.data_mi.index))
preds_mat.append(y_pred)
# store the predictor matrix class membership as a dataframe
preds_mat = np.array(preds_mat).T
pred_cols = [f"{cl}_pred" for cl in X.columns]
self.data_mi_preds = pd.DataFrame(preds_mat, columns=pred_cols)
return self.data_mi_preds
@check_nan_columns
def predict_proba(self, X, **kwargs):
"""Predict probability of missing class membership of each feature.
First checks to ensure data has been fit. If fit, `predict_proba`
method uses the respsective classifier of each feature (in statistics)
and predicts probability of missing class membership for each
observation of each feature. Prediction is probability of missing.
Therefore, probability of not missing is 1-P(missing). For hard class
membership prediction, use `predict`.
Args:
X (pd.DataFrame): DataFrame used to create probabilities.
Returns:
pd.DataFrame: DataFrame with probability of missing class for
each observation.
"""
self._predictor_strategy_validator(X)
preds_mat = []
for column in self.data_mi:
if not np.issubdtype(column, np.datetime64):
preds = self._preds[column]
if preds == "all":
x = X.drop(column, axis=1)
else:
x = X[preds]
cls_fit = self.statistics_[column]
y_pred = cls_fit.predict_proba(x.values, **kwargs)[:, 1]
preds_mat.append(y_pred)
else:
y_pred = np.zeros(len(self.data_mi.index))
preds_mat.append(y_pred)
# store the predictor matrix probabilities as a dataframe
preds_mat = np.array(preds_mat).T
pred_cols = [f"{cl}_pred" for cl in X.columns]
self.data_mi_proba = pd.DataFrame(preds_mat, columns=pred_cols)
return self.data_mi_proba
def fit_predict(self, X):
"""Convenience method for fit and class prediction.
Args:
X (pd.DataFrame): DataFrame to fit classifier and predict class.
Returns:
pd.DataFrame: DataFrame of class predictions.
"""
return self.fit(X).predict(X)
def fit_predict_proba(self, X):
"""Convenience method for fit and class probability prediction.
Args:
X (pd.DataFrame): DataFrame to fit classifier and prredict prob.
Returns:
pd.DataFrame: DataFrame of class probability predictions.
"""
return self.fit(X).predict_proba(X)
@check_nan_columns
def gen_test_indices(self, X, thresh=0.5, use_exist=False):
"""Generate indices of false positives for each fitted column.
Method generates the locations (indices) of false positives returned
from classifiers. These are instances that have a high probability of
being missing even though true value is observed. Use this method to
get indices without mutating the actual DataFrame. To set the values
to missing for the actual DataFrame, use `gen_test_df`.
Args:
X (pd.DataFrame): DataFrame from which test indices generated.
Data first goes through `fit_predict_proba`.
thresh (float, optional): Threshhold for generating false positive.
If raw value is observed and P(missing) >= thresh, then the
observation is considered a false positive and index is stored.
use_exist (bool, optional): Whether or not to use existing fit and
classifiers. Default is False.
Returns:
self: test_indice available from `self.test_indices`
"""
# always fit_transform with dataset, as test vals can change
self.test_indices = {}
if not use_exist:
self.fit_predict_proba(X)
# loop through missing data indicators, eval new set for missing
for c in self.data_mi:
mi_c = self.data_mi[c]
not_mi = mi_c[mi_c == 0].index
pred_not_mi = self.data_mi_proba.loc[not_mi, f"{c}_pred"]
pred_wrong = pred_not_mi[pred_not_mi > thresh].index
self.test_indices[c] = pred_wrong
return self
def gen_test_df(self, X, thresh=0.5, m=0.05,
inplace=False, use_exist=False):
"""Generate new DatFrame with value of false positives set to missing.
Method | |
of "de"
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
return self.cubefile_endpoint.call_with_http_info(**kwargs)
def cubes(self, **kwargs):
"""cubes # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cubes(async_req=True)
>>> result = thread.get()
Keyword Args:
username (str): [optional] if omitted the server will use the default value of "GAST"
password (str): [optional] if omitted the server will use the default value of "GAST"
selection (str): [optional]
area (str): [optional] if omitted the server will use the default value of "free"
pagelength (str): [optional] if omitted the server will use the default value of "100"
language (str): [optional] if omitted the server will use the default value of "de"
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
return self.cubes_endpoint.call_with_http_info(**kwargs)
def cubes2statistic(self, **kwargs):
"""cubes2statistic # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cubes2statistic(async_req=True)
>>> result = thread.get()
Keyword Args:
username (str): [optional] if omitted the server will use the default value of "GAST"
password (str): [optional] if omitted the server will use the default value of "GAST"
name (str): [optional]
selection (str): [optional]
area (str): [optional] if omitted the server will use the default value of "free"
pagelength (str): [optional] if omitted the server will use the default value of "100"
language (str): [optional] if omitted the server will use the default value of "de"
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
return self.cubes2statistic_endpoint.call_with_http_info(**kwargs)
def cubes2variable(self, **kwargs):
"""cubes2variable # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cubes2variable(async_req=True)
>>> result = thread.get()
Keyword Args:
username (str): [optional] if omitted the server will use the default value of "GAST"
password (str): [optional] if omitted the server will use the default value of "<PASSWORD>"
name (str): [optional]
selection (str): [optional]
area (str): [optional] if omitted the server will use the default value of "free"
pagelength (str): [optional] if omitted the server will use the default value of "100"
language (str): [optional] if omitted the server will use the default value of "de"
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
return self.cubes2variable_endpoint.call_with_http_info(**kwargs)
def find(self, **kwargs):
"""find # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.find(async_req=True)
>>> result = thread.get()
Keyword Args:
username (str): [optional] if omitted the server will use the default value of "GAST"
password (str): [optional] if omitted the server will use the default value of "GAST"
term (str): [optional]
category (str): [optional] if omitted the server will use the default value of "all"
pagelength (str): [optional] if omitted the server will use the default value of "100"
language (str): [optional] if omitted the server will use the default value of "de"
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
return self.find_endpoint.call_with_http_info(**kwargs)
def jobs(self, **kwargs):
"""jobs # noqa: E501
This method makes a synchronous HTTP request by | |
<filename>spinup/algos/sac1_rnn/sac1_rnn.py
import numpy as np
import tensorflow as tf
from numbers import Number
import gym
import time
from spinup.algos.sac1_rnn import core
from spinup.algos.sac1_rnn.core import get_vars
from spinup.utils.logx import EpochLogger
from gym.spaces import Box, Discrete
from spinup.utils.frame_stack import FrameStack
from collections import deque
# config = tf.ConfigProto()
# config.gpu_options.allow_growth = True
# session = tf.Session(config=config)
import os
class ReplayBuffer:
"""
A simple FIFO experience replay buffer for SAC agents.
"""
def __init__(self, obs_dim, act_dim, size):
self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.acts_buf = np.zeros([size, act_dim], dtype=np.float32)
self.rews_buf = np.zeros(size, dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.ptr, self.size, self.max_size = 0, 0, size
def store(self, obs, act, rew, next_obs, done):
self.obs1_buf[self.ptr] = obs
self.obs2_buf[self.ptr] = next_obs
self.acts_buf[self.ptr] = act
self.rews_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample_batch(self, batch_size=32):
idxs = np.random.randint(0, self.size, size=batch_size)
return dict(obs1=self.obs1_buf[idxs],
obs2=self.obs2_buf[idxs],
acts=self.acts_buf[idxs],
rews=self.rews_buf[idxs],
done=self.done_buf[idxs])
class ReplayBuffer_RNN:
"""
A simple FIFO experience replay buffer for SAC_RNN agents.
"""
def __init__(self, Lb, Lt, hc_dim, obs_dim, act_dim, size):
self.buffer_obs = np.zeros([size, Lb + Lt + 1, obs_dim], dtype=np.float32)
self.buffer_hc = np.zeros([size, hc_dim], dtype=np.float32)
self.buffer_a = np.zeros([size, Lb + Lt, act_dim], dtype=np.float32)
self.buffer_r = np.zeros([size, Lb + Lt], dtype=np.float32)
self.buffer_d = np.zeros([size, Lb + Lt], dtype=np.float32)
self.buffer_data01 = np.zeros([size, Lb + Lt], dtype=np.float32)
self.ptr, self.size, self.max_size = 0, 0, size
def store(self, obs_hc_queue, a_r_d_data01_queue):
obs, hc = np.stack(obs_hc_queue, axis=1)
self.buffer_obs[self.ptr] = np.array(list(obs), dtype=np.float32)
self.buffer_hc[self.ptr] = np.array(list(hc), dtype=np.float32)[0]
a, r, d, data01 = np.stack(a_r_d_data01_queue, axis=1)
self.buffer_a[self.ptr] = np.array(list(a), dtype=np.float32)
self.buffer_r[self.ptr] = np.array(list(r), dtype=np.float32)
self.buffer_d[self.ptr] = np.array(list(d), dtype=np.float32)
self.buffer_data01[self.ptr] = np.array(list(data01), dtype=np.float32)
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample_batch(self, batch_size=32):
idxs = np.random.randint(0, self.size, size=batch_size)
return dict(obs=self.buffer_obs[idxs],
hc=self.buffer_hc[idxs],
acts=self.buffer_a[idxs],
rews=self.buffer_r[idxs],
done=self.buffer_d[idxs],
data01=self.buffer_data01[idxs], )
"""
Soft Actor-Critic
(With slight variations that bring it closer to TD3)
Lt >= Lb > 0 !!!
"""
def sac1_rnn(args, env_fn, actor_critic=core.mlp_actor_critic, sac1_dynamic_rnn=core.sac1_dynamic_rnn,
ac_kwargs=dict(), seed=0, Lb=10, Lt=10, hc_dim=128, steps_per_epoch=3000, epochs=100,
replay_size=int(5e5), gamma=0.99, reward_scale=1.0, polyak=0.995, lr=5e-4, alpha=0.2,
h0=1.0, batch_size=150, start_steps=10000, max_ep_len_train=1000, max_ep_len_test=1000,
logger_kwargs=dict(), save_freq=1):
"""
Args:
env_fn : A function which creates a copy of the environment.
The environment must satisfy the OpenAI Gym API.
actor_critic: A function which takes in placeholder symbols
for state, ``x_ph``, and action, ``a_ph``, and returns the main
outputs from the agent's Tensorflow computation graph:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``mu`` (batch, act_dim) | Computes mean actions from policy
| given states.
``pi`` (batch, act_dim) | Samples actions from policy given
| states.
``logp_pi`` (batch,) | Gives log probability, according to
| the policy, of the action sampled by
| ``pi``. Critical: must be differentiable
| with respect to policy parameters all
| the way through action sampling.
``q1`` (batch,) | Gives one estimate of Q* for
| states in ``x_ph`` and actions in
| ``a_ph``.
``q2`` (batch,) | Gives another estimate of Q* for
| states in ``x_ph`` and actions in
| ``a_ph``.
``q1_pi`` (batch,) | Gives the composition of ``q1`` and
| ``pi`` for states in ``x_ph``:
| q1(x, pi(x)).
``q2_pi`` (batch,) | Gives the composition of ``q2`` and
| ``pi`` for states in ``x_ph``:
| q2(x, pi(x)).
=========== ================ ======================================
ac_kwargs (dict): Any kwargs appropriate for the actor_critic
function you provided to SAC.
seed (int): Seed for random number generators.
steps_per_epoch (int): Number of steps of interaction (state-action pairs)
for the agent and the environment in each epoch.
epochs (int): Number of epochs to run and train agent.
replay_size (int): Maximum length of replay buffer.
gamma (float): Discount factor. (Always between 0 and 1.)
polyak (float): Interpolation factor in polyak averaging for target
networks. Target networks are updated towards main networks
according to:
.. math:: \\theta_{\\text{targ}} \\leftarrow
\\rho \\theta_{\\text{targ}} + (1-\\rho) \\theta
where :math:`\\rho` is polyak. (Always between 0 and 1, usually
close to 1.)
lr (float): Learning rate (used for policy/value/alpha learning).
alpha (float/'auto'): Entropy regularization coefficient. (Equivalent to
inverse of reward scale in the original SAC paper.) / 'auto': alpha is automated.
batch_size (int): Minibatch size for SGD.
start_steps (int): Number of steps for uniform-random action selection,
before running real policy. Helps exploration.
max_ep_len (int): Maximum length of trajectory / episode / rollout.
logger_kwargs (dict): Keyword args for EpochLogger.
save_freq (int): How often (in terms of gap between epochs) to save
the current policy and value function.
"""
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
tf.set_random_seed(seed)
np.random.seed(seed)
env, test_env = env_fn('train'), env_fn('test')
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
# Action limit for clamping: critically, assumes all dimensions share the same bound!
act_limit = env.action_space.high[0]
# Share information about action space with policy architecture
ac_kwargs['action_space'] = env.action_space
######################################
# Inputs to computation graph
# x_ph, a_ph, x2_ph, r_ph, d_ph = core.placeholders(obs_dim, act_dim, obs_dim, None, None)
#
# # Main outputs from computation graph
# with tf.variable_scope('main'):
# mu, pi, logp_pi, q1, q2, q1_pi, q2_pi = actor_critic(x_ph, a_ph, **ac_kwargs)
#
# # Target value network
# with tf.variable_scope('target'):
# _, _, logp_pi_, _, _, q1_pi_, q2_pi_ = actor_critic(x2_ph, a_ph, **ac_kwargs)
#
######################################
obs_ph, hc_ph = core.placeholders((Lb + Lt + 1, obs_dim), (hc_dim,))
a_ph_all, r_ph_all, d_ph_all, data01_ph = core.placeholders((Lb + Lt, act_dim), (Lb + Lt,), (Lb + Lt,), (Lb + Lt,))
obs_burn = obs_ph[:, :Lb]
obs_train = obs_ph[:, Lb:]
obs12_train = data01_ph[:, Lb:]
# obs12_train = tf.transpose(obs12_train, perm=[1, 0])
a_ph = a_ph_all[:, Lb:]
r_ph = r_ph_all[:, Lb:]
d_ph = d_ph_all[:, Lb:]
_, state_burn_in = sac1_dynamic_rnn(obs_burn, hc_ph)
state_burn_in = tf.stop_gradient(state_burn_in) * data01_ph[:, 0][..., tf.newaxis]
s_outputs, _ = sac1_dynamic_rnn(obs_train, state_burn_in)
s_ph = s_outputs[:, :-1]
s2_ph = s_outputs[:, 1:]
logp_pi, logp_pi2, q1, q2, q1_pi, q2_pi = [None, ] * Lt, [None, ] * Lt, [None, ] * Lt, \
[None, ] * Lt, [None, ] * Lt, [None, ] * Lt
logp_pi_, q1_pi_, q2_pi_ = [None, ] * Lt, [None, ] * Lt, [None, ] * Lt
for i in range(Lt):
# Main outputs from computation graph
with tf.variable_scope('main', reuse=tf.AUTO_REUSE):
######################################
_, _, logp_pi[i], logp_pi2[i], q1[i], q2[i], q1_pi[i], q2_pi[i] = actor_critic(s_ph[:, i],
s2_ph[:, i],
a_ph[:, i],
**ac_kwargs)
# Target value network
with tf.variable_scope('target', reuse=tf.AUTO_REUSE):
_, _, logp_pi_[i], _, _, _, q1_pi_[i], q2_pi_[i] = actor_critic(s2_ph[:, i], s2_ph[:, i], a_ph[:, i],
**ac_kwargs)
logp_pi, logp_pi2, q1, q2, q1_pi, q2_pi = tf.stack(logp_pi, axis=1), tf.stack(logp_pi2, axis=1), \
tf.stack(q1, axis=1), tf.stack(q2, axis=1), tf.stack(q1_pi, axis=1), tf.stack(q2_pi, axis=1)
logp_pi_, q1_pi_, q2_pi_ = tf.stack(logp_pi_, axis=1), tf.stack(q1_pi_, axis=1), tf.stack(q2_pi_, axis=1)
######################################
# Experience buffer
# replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size)
replay_buffer_rnn = ReplayBuffer_RNN(Lb=Lb, Lt=Lt, hc_dim=hc_dim, obs_dim=obs_dim, act_dim=act_dim,
size=replay_size)
# Count variables
# var_counts = tuple(core.count_vars(scope) for scope in
# ['main/pi', 'main/q1', 'main/q2', 'rnn'])
# print(('\nNumber of parameters: \t pi: %d, \t' + 'q1: %d, \t q2: %d, \t rnn: %d\n') % var_counts)
# print('Number of parameters: \t Total: %d\n' % sum(var_counts))
######
if alpha == 'auto':
target_entropy = (-np.prod(env.action_space.shape))
log_alpha = tf.get_variable('log_alpha', dtype=tf.float32, initializer=0.0)
alpha = tf.exp(log_alpha)
alpha_loss = tf.reduce_mean(-log_alpha * tf.stop_gradient(logp_pi + target_entropy))
alpha_optimizer = tf.train.AdamOptimizer(learning_rate=lr * h0, name='alpha_optimizer')
train_alpha_op = alpha_optimizer.minimize(loss=alpha_loss, var_list=[log_alpha])
######
# Min Double-Q:
min_q_pi_ = tf.minimum(q1_pi_, q2_pi_)
# Targets for Q and V regression
v_backup = tf.stop_gradient(min_q_pi_ - alpha * logp_pi2)
q_backup = r_ph + gamma * (1 - d_ph) * v_backup
# Soft actor-critic losses
pi_loss = tf.reduce_mean(obs12_train * (alpha * logp_pi - q1_pi))
q1_loss = 0.5 * tf.reduce_mean(obs12_train * (q_backup - q1) ** 2)
q2_loss = 0.5 * tf.reduce_mean(obs12_train * (q_backup - q2) ** 2)
value_loss = q1_loss + q2_loss
# Policy train op
# (has to be separate from value train op, because q1_pi appears in pi_loss)
pi_optimizer = tf.train.AdamOptimizer(learning_rate=lr)
pi_params = get_vars('main/pi')
train_pi_op = pi_optimizer.minimize(pi_loss, var_list=pi_params)
# Value train op
# (control dep of train_pi_op because sess.run otherwise evaluates in nondeterministic order)
value_optimizer = tf.train.AdamOptimizer(learning_rate=lr)
value_params = get_vars('main/q') + get_vars('rnn')
with tf.control_dependencies([train_pi_op]):
train_value_op = value_optimizer.minimize(value_loss, var_list=value_params)
# Polyak averaging for target variables
# (control flow because sess.run otherwise evaluates in nondeterministic order)
with tf.control_dependencies([train_value_op]):
target_update = tf.group([tf.assign(v_targ, polyak * | |
collections
class Stash(ABC):
"""Pure virtual classes that represents CRUDing data that uses ``dict``
semantics. The data is usually CRUDed to the file system but need not be.
Instance can be used as iterables or dicsts. If the former, each item is
returned as a key/value tuple.
Note that while the functionality might appear similar to a dict when used
as such, there are subtle differences. For example, when indexing
obtaining the value is sometimes *forced* by using some mechanism to create
the item. When using ``get`` it relaxes this creation mechanism for some
implementations.
"""
@abstractmethod
def load(self, name: str):
"""Load a data value from the pickled data with key ``name``.
"""
pass
def get(self, name: str, default=None):
"""Load an object or a default if key ``name`` doesn't exist.
"""
ret = self.load(name)
if ret is None:
return default
else:
return ret
@abstractmethod
def exists(self, name: str) -> bool:
"""Return ``True`` if data with key ``name`` exists.
"""
pass
@abstractmethod
def dump(self, name: str, inst):
"Persist data value ``inst`` with key ``name``."
pass
@abstractmethod
def delete(self, name=None):
"""Delete the resource for data pointed to by ``name`` or the entire resource
if ``name`` is not given.
"""
pass
def clear(self):
"""Delete all data from the from the stash.
*Important*: Exercise caution with this method, of course.
"""
for k in self.keys():
self.delete(k)
@abstractmethod
def keys(self) -> List[str]:
"""Return an iterable of keys in the collection.
"""
pass
def key_groups(self, n):
"Return an iterable of groups of keys, each of size at least ``n``."
return chunks(self.keys(), n)
def values(self):
"""Return the values in the hash.
"""
return map(lambda k: self.__getitem__(k), self.keys())
def items(self):
"""Return an iterable of all stash items."""
return map(lambda k: (k, self.__getitem__(k)), self.keys())
def __getitem__(self, key):
exists = self.exists(key)
item = self.load(key)
if item is None:
raise KeyError(key)
if not exists:
self.dump(key, item)
return item
def __setitem__(self, key, value):
self.dump(key, value)
def __delitem__(self, key):
self.delete(key)
def __contains__(self, key):
return self.exists(key)
def __iter__(self):
return map(lambda x: (x, self.__getitem__(x),), self.keys())
def __len__(self):
return len(tuple(self.keys()))
class CloseableStash(Stash):
"""Any stash that has a resource that needs to be closed.
"""
@abstractmethod
def close(self):
"Close all resources created by the stash."
pass
class DelegateStash(CloseableStash, metaclass=ABCMeta):
"""Delegate pattern. It can also be used as a no-op if no delegate is given.
A minimum functioning implementation needs the ``load`` and ``keys``
methods overriden. Inheriting and implementing a ``Stash`` such as this is
usually used as the ``factory`` in a ``FactoryStash``.
"""
def __init__(self, delegate: Stash = None):
if delegate is not None and not isinstance(delegate, Stash):
raise ValueError(f'not a stash: {delegate}')
self.delegate = delegate
def __getattr__(self, attr, default=None):
try:
delegate = super(DelegateStash, self).__getattribute__('delegate')
except AttributeError:
raise AttributeError(
f"'{self.__class__.__name__}' object has no attribute '{attr}'; delegate not set'")
if delegate is not None:
return delegate.__getattribute__(attr)
raise AttributeError(
f"'{self.__class__.__name__}' object has no attribute '{attr}''")
def load(self, name: str):
if self.delegate is not None:
return self.delegate.load(name)
def get(self, name: str, default=None):
if self.delegate is None:
return super(DelegateStash, self).get(name, default)
else:
return self.delegate.get(name, default)
def exists(self, name: str):
if self.delegate is not None:
return self.delegate.exists(name)
else:
return False
def dump(self, name: str, inst):
if self.delegate is not None:
return self.delegate.dump(name, inst)
def delete(self, name=None):
if self.delegate is not None:
self.delegate.delete(name)
def keys(self):
if self.delegate is not None:
return self.delegate.keys()
return ()
def clear(self):
super(DelegateStash, self).clear()
if self.delegate is not None:
self.delegate.clear()
def close(self):
if self.delegate is not None:
return self.delegate.close()
class KeyLimitStash(DelegateStash):
"""A stash that limits the number of generated keys useful for debugging.
For most stashes, this also limits the iteration output since that is based
on key mapping.
"""
def __init__(self, delegate: Stash, n_limit=10):
super(KeyLimitStash, self).__init__(delegate)
self.n_limit = n_limit
def keys(self):
ks = super(KeyLimitStash, self).keys()
return it.islice(ks, self.n_limit)
class PreemptiveStash(DelegateStash):
"""Provide support for preemptively creating data in a stash.
"""
@property
def has_data(self):
"""Return whether or not the stash has any data available or not.
"""
return self._calculate_has_data()
def _calculate_has_data(self):
"""Return ``True`` if the delegate has keys.
"""
if not hasattr(self, '_has_data'):
try:
next(iter(self.delegate.keys()))
self._has_data = True
except StopIteration:
self._has_data = False
return self._has_data
def _reset_has_data(self):
"""Reset the state of whether the stash has data or not.
"""
if hasattr(self, '_has_data'):
delattr(self, '_has_data')
def _set_has_data(self, has_data=True):
"""Set the state of whether the stash has data or not.
"""
self._has_data = has_data
def clear(self):
if self._calculate_has_data():
super(PreemptiveStash, self).clear()
self._reset_has_data()
class FactoryStash(PreemptiveStash):
"""A stash that defers to creation of new items to another ``factory`` stash.
"""
def __init__(self, delegate, factory, enable_preemptive=True):
"""Initialize.
:param delegate: the stash used for persistence
:type delegate: Stash
:param factory: the stash used to create using ``load`` and ``keys``
:type factory: Stash
"""
super(FactoryStash, self).__init__(delegate)
self.factory = factory
self.enable_preemptive = enable_preemptive
def _calculate_has_data(self) -> bool:
if self.enable_preemptive:
return super(FactoryStash, self)._calculate_has_data()
else:
return False
def load(self, name: str):
item = super(FactoryStash, self).load(name)
if item is None:
self._reset_has_data()
item = self.factory.load(name)
return item
def keys(self) -> List[str]:
if self.has_data:
ks = super(FactoryStash, self).keys()
else:
ks = self.factory.keys()
return ks
class OneShotFactoryStash(PreemptiveStash, metaclass=ABCMeta):
"""A stash that is populated by a callable or an iterable 'worker'. The data
is generated by the worker and dumped to the delegate.
"""
def __init__(self, worker, *args, **kwargs):
"""Initialize the stash.
:param worker: either a callable (i.e. function) or an interable that
return tuples or lists of (key, object)
"""
super(OneShotFactoryStash, self).__init__(*args, **kwargs)
self.worker = worker
def _process_work(self):
"""Invoke the worker to generate the data and dump it to the delegate.
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'processing with {type(self.worker)}')
if callable(self.worker):
itr = self.worker()
else:
itr = self.worker
for id, obj in itr:
self.delegate.dump(id, obj)
def prime(self):
has_data = self.has_data
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'asserting data: {has_data}')
if not has_data:
with time(f'processing work in {self}'):
self._process_work()
self._reset_has_data()
def get(self, name: str, default=None):
self.prime()
return super(OneShotFactoryStash, self).get(name, default)
def load(self, name: str):
self.prime()
return super(OneShotFactoryStash, self).load(name)
def keys(self):
self.prime()
return super(OneShotFactoryStash, self).keys()
class OrderedKeyStash(DelegateStash):
"""Specify an ordering to how keys in a stash are returned. This usually also
has an impact on the order in which values are iterated since a call to get
the keys determins it.
"""
def __init__(self, delegate: Stash, order_function: Callable = int):
super(OrderedKeyStash, self).__init__(delegate)
self.order_function = order_function
def keys(self) -> List[str]:
keys = super(OrderedKeyStash, self).keys()
if self.order_function:
keys = sorted(keys, key=self.order_function)
else:
keys = sorted(keys)
return keys
class DictionaryStash(DelegateStash):
"""Use a dictionary as a backing store to the stash. If one is not provided in
the initializer a new ``dict`` is created.
"""
def __init__(self, data: dict = None):
super(DictionaryStash, self).__init__()
if data is None:
self._data = {}
else:
self._data = data
@property
def data(self):
return self._data
def load(self, name: str):
return self.data.get(name)
def get(self, name: str, default=None):
return self.data.get(name, default)
def exists(self, name: str):
return name in self.data
def dump(self, name: str, inst):
self.data[name] = inst
def delete(self, name=None):
del self.data[name]
def keys(self):
return self.data.keys()
def clear(self):
self.data.clear()
super(DictionaryStash, self).clear()
def __getitem__(self, key):
return self.data[key]
class CacheStash(DelegateStash):
"""Provide a dictionary based caching based stash.
"""
def __init__(self, delegate, cache_stash=None, read_only=False):
"""Initialize.
:param delegate: the underlying persistence stash
:param cache_stash: a stash used for caching (defaults to
``DictionaryStash``)
:param read_only: if ``True``, make no changes to ``delegate``
"""
super(CacheStash, self).__init__(delegate)
if cache_stash is None:
self.cache_stash = DictionaryStash()
else:
self.cache_stash = cache_stash
self.read_only = read_only
def load(self, name: str):
if self.cache_stash.exists(name):
return self.cache_stash.load(name)
else:
obj = self.delegate.load(name)
self.cache_stash.dump(name, obj)
return obj
def exists(self, name: str):
return self.cache_stash.exists(name) or self.delegate.exists(name)
def delete(self, name=None):
if self.cache_stash.exists(name):
self.cache_stash.delete(name)
if not self.read_only:
self.delegate.delete(name)
def clear(self):
if not self.read_only:
super(CacheStash, self).clear()
self.cache_stash.clear()
class DirectoryStash(Stash):
"""Creates a pickeled data file with a file name in a directory with a given
pattern across all instances.
"""
def __init__(self, create_path: Path, pattern='{name}.dat'):
"""Create a stash.
:param create_path: the directory of where to store the files
:param pattern: the file name portion with ``name`` populating to the
key of the data value
"""
self.pattern = pattern
self.create_path = create_path
def _create_path_dir(self):
self.create_path.mkdir(parents=True, exist_ok=True)
def _get_instance_path(self, | |
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Tensorflow implementation of a LennardJones Potential for a single element.
This is a standalone module by design.
"""
import numpy as np
import tensorflow as tf
from dap.tf.neighborlist import get_neighbors_oneway
from ase.calculators.calculator import Calculator, all_changes
import ase.db
def get_Rij(positions, cell, mask, cutoff_radius):
"""Get distances to neighboring atoms with periodic boundary conditions.
The way this function works is it tiles space with unit cells to at least fill
a sphere with a radius of cutoff_radius. That means some atoms will be outside
the cutoff radius. Those are included in the results. Then we get distances to
all atoms in the tiled space. This is always the same number for every atom,
so we have consistent sized arrays.
This function is specific to the Lennard Jones potential as noted in the
comments below.
Args:
positions: array-like shape=(numatoms, 3)
Array of cartesian coordinates of atoms in a unit cell.
cell: array-like shape=(3, 3)
Array of unit cell vectors in cartesian basis. Each row is a unit cell
vector.
mask: array-like (numatoms,)
ones for atoms, zero for padded positions
cutoff_radius: float
The cutoff_radius we want atoms within.
Returns:
A flattened array of distances to all the neighbors.
Notes:
One of the distances is equal to 0.0, which corresponds to Rii. This distance
is problematic for the gradients, which are undefined for these points. I have
not found a masking strategy to eliminate these points while keeping the
gradients besides the one used here. This is not an issue with other
potentials that don't have a 1/r form like this one does.
This code was adapted from:
Related: pydoc:pymatgen.core.lattice.Lattice.get_points_in_sphere
"""
with tf.name_scope("get_Rij"):
positions = tf.convert_to_tensor(positions)
cell = tf.convert_to_tensor(cell)
mask = tf.convert_to_tensor(mask, dtype=cell.dtype)
with tf.name_scope("get_offsets"):
# Next we get the reciprocal unit cell, which will be used to compute the
# unit cell offsets required to tile space inside the sphere.
inverse_cell = tf.matrix_inverse(cell)
fcoords = tf.mod(
tf.matmul(positions, inverse_cell), tf.ones_like(positions))
recp_len = tf.norm(inverse_cell, axis=0)
nmax = cutoff_radius * recp_len
mins = tf.reduce_min(tf.floor(fcoords - nmax), axis=0)
maxs = tf.reduce_max(tf.ceil(fcoords + nmax), axis=0)
# Now we generate a set of cell offsets. We start with the repeats in each
# unit cell direction.
arange = tf.range(mins[0], maxs[0])
brange = tf.range(mins[1], maxs[1])
crange = tf.range(mins[2], maxs[2])
# Then we expand them in each dimension
xhat = tf.constant([1.0, 0.0, 0.0], dtype=inverse_cell.dtype)
yhat = tf.constant([0.0, 1.0, 0.0], dtype=inverse_cell.dtype)
zhat = tf.constant([0.0, 0.0, 1.0], dtype=inverse_cell.dtype)
arange = arange[:, None] * xhat[None, :]
brange = brange[:, None] * yhat[None, :]
crange = crange[:, None] * zhat[None, :]
# And combine them to get an offset vector for each cell
offsets = (
arange[:, None, None] + brange[None, :, None] + crange[None, None, :])
offsets = tf.reshape(offsets, (-1, 3))
# Now we have a vector of unit cell offsets (offset_index, 3) in the inverse
# unit cell basis. We convert that to cartesian coordinate offsets here.
cart_offsets = tf.matmul(offsets, cell)
# we need to offset each atom coordinate by each offset.
# This array is (atom_index, offset, 3)
shifted_cart_coords = positions[:, None] + cart_offsets[None, :]
# Next, we subtract each position from the array of positions.
# This leads to (atom_i, atom_j, positionvector, xhat)
relative_positions = shifted_cart_coords - positions[:, None, None]
# This is the distance squared. This leads to (atom_i, atom_j, distance2)
Rij2 = tf.reduce_sum(relative_positions**2, axis=3)
# We zero out masked distances. This is subtle. We have to zero out parts of
# two dimensions. First, all the entries in the first dimension which are
# not atoms must be zeroed, and then, all the entries in the second
# dimension which aren't atoms have to be zeroed.
Rij2 *= mask[:, None] * mask[:, None, None]
# Since we assume the atoms are all the same we can flatten it. It turns out
# that the array will get flattened anyway because of the boolean mask in
# the return. This effectively removes elements in some of the subarrays so
# the shape is no longer constant, causing the array to be flattened.
Rij2 = tf.reshape(Rij2, [-1])
# We exclude the self-interaction by only considering atoms with a distance
# greater than 0. For this potential, it is necessary to do this here to
# avoid nan's in the gradients.
#
# It is not necessary to take the square root here, since we later compute
# 1/Rij^6. But, this function was originally intended to be used for other
# potentials where Rij is used directly, so we do that here.
#
# We do not mask out the values greater than cutoff_radius here. That is
# done later in the energy function.
return tf.sqrt(tf.boolean_mask(Rij2, Rij2 > 0.0))
def energy(positions, cell, mask=None, strain=None):
"""Compute the energy of a Lennard-Jones system.
Args:
positions: array-like shape=(numatoms, 3)
Array of cartesian coordinates of atoms in a unit cell.
cell: array-like shape=(3, 3)
Array of unit cell vectors in cartesian basis. Each row is a unit cell
vector.
mask: array-like (numatoms,)
ones for atoms, zero for padded positions.
strain: array-like shape=(3, 3)
Array of strains to compute the energy at.
Returns: float
The total energy from the Lennard Jones potential.
"""
with tf.name_scope("LennardJones"):
with tf.name_scope("setup"):
positions = tf.convert_to_tensor(positions)
cell = tf.convert_to_tensor(cell)
if mask is None:
mask = tf.ones_like(positions[:, 0])
mask = tf.convert_to_tensor(mask)
if strain is None:
strain = tf.zeros_like(cell)
strain = tf.convert_to_tensor(strain)
strained_cell = tf.matmul(cell, tf.eye(3, dtype=cell.dtype) + strain)
strained_positions = tf.matmul(positions,
tf.eye(3, dtype=cell.dtype) + strain)
with tf.variable_scope("sigma", reuse=tf.AUTO_REUSE):
sigma = tf.get_variable(
"sigma",
dtype=cell.dtype,
initializer=tf.constant(1.0, dtype=cell.dtype))
with tf.variable_scope("epsilon", reuse=tf.AUTO_REUSE):
epsilon = tf.get_variable(
"epsilon",
dtype=cell.dtype,
initializer=tf.constant(1.0, dtype=cell.dtype))
rc = 3 * sigma
with tf.name_scope("calculate_energy"):
e0 = 4 * epsilon * ((sigma / rc)**12 - (sigma / rc)**6)
energy = 0.0
d = get_Rij(strained_positions, strained_cell, mask, rc)
neighbor_mask = tf.less_equal(d, tf.ones_like(d) * rc)
energy -= e0 * tf.reduce_sum(tf.cast(neighbor_mask, e0.dtype))
c6 = (sigma**2 / tf.boolean_mask(d, neighbor_mask)**2)**3
c12 = c6**2
energy += tf.reduce_sum(4 * epsilon * (c12 - c6))
return energy / 2.0
def forces(positions, cell, mask=None, strain=None):
"""Compute the forces.
Args:
positions: array-like shape=(numatoms, 3)
Array of cartesian coordinates of atoms in a unit cell.
cell: array-like shape=(3, 3)
Array of unit cell vectors in cartesian basis. Each row is a unit cell
vector.
mask: array-like (numatoms,)
ones for atoms, zero for padded positions.
strain: array-like shape=(3, 3)
Array of strains to compute the energy at.
Returns:
array: shape=(natoms, 3)
"""
with tf.name_scope("forces"):
positions = tf.convert_to_tensor(positions)
cell = tf.convert_to_tensor(cell)
if mask is None:
mask = tf.ones_like(positions[:, 0])
mask = tf.convert_to_tensor(mask)
if strain is None:
strain = tf.zeros_like(cell)
return tf.gradients(-energy(positions, cell, mask, strain), positions)[0]
def stress(positions, cell, mask=None, strain=None):
"""Compute the stress.
Args:
positions: array-like shape=(numatoms, 3)
Array of cartesian coordinates of atoms in a unit cell.
cell: array-like shape=(3, 3)
Array of unit cell vectors in cartesian basis. Each row is a unit cell
vector.
mask: array-like (numatoms,)
ones for atoms, zero for padded positions
strain: array-like shape=(3, 3)
Array of strains to compute the stress at.
Returns:
The stress components [sxx, syy, szz, syz, sxz, sxy]
array: shape=(6,)
"""
with tf.name_scope("stress"):
with tf.name_scope("setup"):
positions = tf.convert_to_tensor(positions)
cell = tf.convert_to_tensor(cell)
if mask is None:
mask = tf.ones_like(positions[:, 0])
mask = tf.convert_to_tensor(mask)
if strain is None:
strain = tf.zeros_like(cell)
with tf.name_scope("get_stress"):
volume = tf.abs(tf.matrix_determinant(cell))
stress = tf.gradients(energy(positions, cell, mask, strain), strain)[0]
stress /= volume
return tf.gather(tf.reshape(stress, (9,)), [0, 4, 8, 5, 2, 1])
def energy_batch(POSITIONS,
CELLS,
MASKS,
| |
import ast
import copy
import json
import logging
import uuid
from datetime import datetime
import pytz
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
from django.db.models import Count
from django.db.models import F
from django.db.models import Max
from django.db.models import Q
from django.db.models import Sum
from django.http import HttpResponse
from django.http import HttpResponseBadRequest
from django.http import HttpResponseNotFound
from django.utils.translation import ugettext as _
from le_utils.constants import content_kinds
from le_utils.constants import format_presets
from le_utils.constants import roles
from rest_framework.authentication import SessionAuthentication
from rest_framework.authentication import TokenAuthentication
from rest_framework.decorators import api_view
from rest_framework.decorators import authentication_classes
from rest_framework.decorators import permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from contentcuration.models import AssessmentItem
from contentcuration.models import Channel
from contentcuration.models import ContentNode
from contentcuration.models import ContentTag
from contentcuration.models import File
from contentcuration.models import generate_storage_url
from contentcuration.models import Language
from contentcuration.models import License
from contentcuration.models import PrerequisiteContentRelationship
from contentcuration.serializers import ContentNodeEditSerializer
from contentcuration.serializers import ContentNodeSerializer
from contentcuration.serializers import SimplifiedContentNodeSerializer
from contentcuration.utils.files import duplicate_file
@authentication_classes((TokenAuthentication, SessionAuthentication))
@permission_classes((IsAuthenticated,))
def get_node_diff(request, channel_id):
original = [] # Currently imported nodes
changed = [] # Nodes from original node
fields_to_check = ['title', 'description', 'license', 'license_description', 'copyright_holder', 'author', 'extra_fields', 'language', 'role_visibility']
assessment_fields_to_check = ['type', 'question', 'hints', 'answers', 'order', 'raw_data', 'source_url', 'randomize']
current_tree_id = Channel.objects.get(pk=channel_id).main_tree.tree_id
nodes = ContentNode.objects.prefetch_related('assessment_items').prefetch_related('files').prefetch_related('tags')
copied_nodes = nodes.filter(tree_id=current_tree_id).exclude(original_source_node_id=F('node_id'))
channel_ids = copied_nodes.values_list('original_channel_id', flat=True).exclude(original_channel_id=channel_id).distinct()
tree_ids = Channel.objects.filter(pk__in=channel_ids).values_list("main_tree__tree_id", flat=True)
original_node_ids = copied_nodes.values_list('original_source_node_id', flat=True).distinct()
original_nodes = nodes.filter(tree_id__in=tree_ids, node_id__in=original_node_ids)
# Use dictionary for faster lookup speed
content_id_mapping = {n.content_id: n for n in original_nodes}
for copied_node in copied_nodes:
node = content_id_mapping.get(copied_node.content_id)
if node:
# Check lengths, metadata, tags, files, and assessment items
node_changed = node.assessment_items.count() != copied_node.assessment_items.count() or \
node.files.count() != copied_node.files.count() or \
node.tags.count() != copied_node.tags.count() or \
any(filter(lambda f: getattr(node, f, None) != getattr(copied_node, f, None), fields_to_check)) or \
node.tags.exclude(tag_name__in=copied_node.tags.values_list('tag_name', flat=True)).exists() or \
node.files.exclude(checksum__in=copied_node.files.values_list('checksum', flat=True)).exists() or \
node.assessment_items.exclude(assessment_id__in=copied_node.assessment_items.values_list('assessment_id', flat=True)).exists()
# Check individual assessment items
if not node_changed and node.kind_id == content_kinds.EXERCISE:
for ai in node.assessment_items.all():
source_ai = copied_node.assessment_items.filter(assessment_id=ai.assessment_id).first()
if source_ai:
node_changed = node_changed or any(filter(lambda f: getattr(ai, f, None) != getattr(source_ai, f, None), assessment_fields_to_check))
if node_changed:
break
if node_changed:
original.append(copied_node)
changed.append(node)
serialized_original = JSONRenderer().render(SimplifiedContentNodeSerializer(original, many=True).data)
serialized_changed = JSONRenderer().render(SimplifiedContentNodeSerializer(changed, many=True).data)
return HttpResponse(json.dumps({
"original": serialized_original,
"changed": serialized_changed,
}))
def create_new_node(request):
if request.method != 'POST':
return HttpResponseBadRequest("Only POST requests are allowed on this endpoint.")
data = json.loads(request.body)
license = License.objects.filter(license_name=data.get('license_name')).first() # Use filter/first in case preference hasn't been set
license_id = license.pk if license else settings.DEFAULT_LICENSE
new_node = ContentNode.objects.create(
kind_id=data.get('kind'),
title=data.get('title'),
author=data.get('author'),
aggregator=data.get('aggregator'),
provider=data.get('provider'),
copyright_holder=data.get('copyright_holder'),
license_id=license_id,
license_description=data.get('license_description'),
parent_id=settings.ORPHANAGE_ROOT_ID,
)
return HttpResponse(JSONRenderer().render(ContentNodeEditSerializer(new_node).data))
@api_view(['GET'])
def get_prerequisites(request, get_prerequisites, ids):
nodes = ContentNode.objects.prefetch_related('prerequisite').filter(pk__in=ids.split(","))
prerequisite_mapping = {}
postrequisite_mapping = {}
prerequisite_tree_nodes = []
for n in nodes:
prereqs, prereqmapping = n.get_prerequisites()
if get_prerequisites == "true":
postreqs, postreqmapping = n.get_postrequisites()
postrequisite_mapping.update(postreqmapping)
prerequisite_mapping.update(prereqmapping)
prerequisite_tree_nodes += prereqs + postreqs + [n]
else:
prerequisite_mapping.update({n.pk: prereqmapping})
prerequisite_tree_nodes += prereqs + [n]
return HttpResponse(json.dumps({
"prerequisite_mapping": prerequisite_mapping,
"postrequisite_mapping": postrequisite_mapping,
"prerequisite_tree_nodes": JSONRenderer().render(SimplifiedContentNodeSerializer(prerequisite_tree_nodes, many=True).data),
}))
@api_view(['GET'])
def get_total_size(request, ids):
sizes = ContentNode.objects.prefetch_related('assessment_items', 'files', 'children')\
.exclude(kind_id=content_kinds.EXERCISE, published=False)\
.filter(id__in=ids.split(",")).get_descendants(include_self=True)\
.values('files__checksum', 'files__file_size')\
.distinct().aggregate(resource_size=Sum('files__file_size'))
return HttpResponse(json.dumps({'success': True, 'size': sizes['resource_size'] or 0}))
@api_view(['GET'])
def get_nodes_by_ids(request, ids):
nodes = ContentNode.objects.prefetch_related('children', 'files', 'assessment_items', 'tags')\
.filter(pk__in=ids.split(","))\
.defer('node_id', 'original_source_node_id', 'source_node_id', 'content_id',
'original_channel_id', 'source_channel_id', 'source_id', 'source_domain', 'created', 'modified')
serializer = ContentNodeSerializer(nodes, many=True)
return Response(serializer.data)
def get_node_path(request, topic_id, tree_id, node_id):
try:
topic = ContentNode.objects.prefetch_related('children').get(node_id__startswith=topic_id, tree_id=tree_id)
if topic.kind_id != content_kinds.TOPIC:
node = ContentNode.objects.prefetch_related('files', 'assessment_items', 'tags').get(node_id__startswith=topic_id, tree_id=tree_id)
nodes = node.get_ancestors(ascending=True)
else:
node = node_id and ContentNode.objects.prefetch_related('files', 'assessment_items', 'tags').get(node_id__startswith=node_id, tree_id=tree_id)
nodes = topic.get_ancestors(include_self=True, ascending=True)
return HttpResponse(json.dumps({
'path': JSONRenderer().render(ContentNodeSerializer(nodes, many=True).data),
'node': node and JSONRenderer().render(ContentNodeEditSerializer(node).data),
'parent_node_id': topic.kind_id != content_kinds.TOPIC and node.parent and node.parent.node_id
}))
except ObjectDoesNotExist:
return HttpResponseNotFound("Invalid URL: the referenced content does not exist in this channel.")
@api_view(['GET'])
def get_nodes_by_ids_simplified(request, ids):
nodes = ContentNode.objects.prefetch_related('children').filter(pk__in=ids.split(","))
serializer = SimplifiedContentNodeSerializer(nodes, many=True)
return Response(serializer.data)
@api_view(['GET'])
def get_nodes_by_ids_complete(request, ids):
nodes = ContentNode.objects.prefetch_related('children', 'files', 'assessment_items', 'tags').filter(pk__in=ids.split(","))
serializer = ContentNodeEditSerializer(nodes, many=True)
return Response(serializer.data)
def get_channel_thumbnail(channel):
if channel.get("thumbnail_encoding"):
thumbnail_data = channel.get("thumbnail_encoding")
if thumbnail_data.get("base64"):
return thumbnail_data["base64"]
if channel.get("thumbnail"):
return generate_storage_url(channel.get("thumbnail"))
def get_thumbnail(node):
# Problems with json.loads, so use ast.literal_eval to get dict
if node.thumbnail_encoding:
thumbnail_data = ast.literal_eval(node.thumbnail_encoding)
if thumbnail_data.get("base64"):
return thumbnail_data["base64"]
thumbnail = node.files.filter(preset__thumbnail=True).first()
if thumbnail:
return generate_storage_url(str(thumbnail))
return "/".join([settings.STATIC_URL.rstrip("/"), "img", "{}_placeholder.png".format(node.kind_id)])
DATE_TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
@api_view(['GET'])
def get_topic_details(request, contentnode_id):
""" Generates data for topic contents. Used for look-inside previews
Keyword arguments:
contentnode_id (str): id of topic node to get details from
"""
# Get nodes and channel
node = ContentNode.objects.get(pk=contentnode_id)
descendants = node.get_descendants().prefetch_related('children', 'files', 'tags')\
.select_related('license', 'language')
channel = node.get_channel()
# If channel is a sushi chef channel, use date created for faster query
# Otherwise, find the last time anything was updated in the channel
last_update = channel.main_tree.created if channel and channel.ricecooker_version else \
descendants.filter(changed=True)\
.aggregate(latest_update=Max('modified'))\
.get('latest_update')
# See if the latest cached data is up to date since the last update to the channel
cached_data = cache.get("details_{}".format(node.node_id))
if cached_data and last_update:
last_cache_update = datetime.strptime(json.loads(cached_data)['last_update'], DATE_TIME_FORMAT)
if last_update.replace(tzinfo=None) < last_cache_update:
return HttpResponse(cached_data)
# Get resources
resources = descendants.exclude(kind=content_kinds.TOPIC)
# Get all copyright holders, authors, aggregators, and providers and split into lists
creators = resources.values_list('copyright_holder', 'author', 'aggregator', 'provider')
split_lst = zip(*creators)
copyright_holders = filter(bool, set(split_lst[0])) if len(split_lst) > 0 else []
authors = filter(bool, set(split_lst[1])) if len(split_lst) > 1 else []
aggregators = filter(bool, set(split_lst[2])) if len(split_lst) > 2 else []
providers = filter(bool, set(split_lst[3])) if len(split_lst) > 3 else []
# Get sample pathway by getting longest path
# Using resources.aggregate adds a lot of time, use values that have already been fetched
max_level = max(resources.values_list('level', flat=True).distinct() or [0])
deepest_node = resources.filter(level=max_level).first()
pathway = list(deepest_node.get_ancestors()
.exclude(parent=None)
.values('title', 'node_id', 'kind_id')
) if deepest_node else []
sample_nodes = [
{
"node_id": n.node_id,
"title": n.title,
"description": n.description,
"thumbnail": get_thumbnail(n),
} for n in deepest_node.get_siblings(include_self=True)[0:4]
] if deepest_node else []
# Get list of channels nodes were originally imported from (omitting the current channel)
channel_id = channel and channel.id
originals = resources.values("original_channel_id")\
.annotate(count=Count("original_channel_id"))\
.order_by("original_channel_id")
originals = {c['original_channel_id']: c['count'] for c in originals}
original_channels = Channel.objects.exclude(pk=channel_id)\
.filter(pk__in=[k for k, v in originals.items()], deleted=False)\
.values('id', 'name', 'thumbnail', 'thumbnail_encoding')
original_channels = [{
"id": c["id"],
"name": "{}{}".format(c["name"], _(" (Original)") if channel_id == c["id"] else ""),
"thumbnail": get_channel_thumbnail(c),
"count": originals[c["id"]]
} for c in original_channels]
# Get tags from channel
tags = list(ContentTag.objects.filter(tagged_content__pk__in=descendants.values_list('pk', flat=True))
.values('tag_name')
.annotate(count=Count('tag_name'))
.order_by('tag_name'))
# Get resource variables
resource_count = resources.count() or 0
resource_size = resources.values('files__checksum', 'files__file_size').distinct().aggregate(resource_size=Sum('files__file_size'))['resource_size'] or 0
languages = list(set(descendants.exclude(language=None).values_list('language__native_name', flat=True)))
accessible_languages = resources.filter(files__preset_id=format_presets.VIDEO_SUBTITLE)\
.values_list('files__language_id', flat=True)
accessible_languages = list(Language.objects.filter(id__in=accessible_languages).distinct().values_list('native_name', flat=True))
licenses = list(set(resources.exclude(license=None).values_list('license__license_name', flat=True)))
kind_count = list(resources.values('kind_id').annotate(count=Count('kind_id')).order_by('kind_id'))
# Add "For Educators" booleans
for_educators = {
"coach_content": resources.filter(role_visibility=roles.COACH).exists(),
"exercises": resources.filter(kind_id=content_kinds.EXERCISE).exists(),
}
# Serialize data
data = json.dumps({
"last_update": pytz.utc.localize(datetime.now()).strftime(DATE_TIME_FORMAT),
"resource_count": resource_count,
"resource_size": resource_size,
"includes": for_educators,
"kind_count": kind_count,
"languages": languages,
"accessible_languages": accessible_languages,
"licenses": licenses,
"tags": tags,
"copyright_holders": copyright_holders,
"authors": authors,
"aggregators": aggregators,
"providers": providers,
"sample_pathway": pathway,
"original_channels": original_channels,
"sample_nodes": sample_nodes,
})
# Set cache with latest data
cache.set("details_{}".format(node.node_id), data, None)
return HttpResponse(data)
@authentication_classes((TokenAuthentication, SessionAuthentication))
@permission_classes((IsAuthenticated,))
def delete_nodes(request):
logging.debug("Entering the copy_node endpoint")
if request.method != 'POST':
return HttpResponseBadRequest("Only POST requests are allowed on this endpoint.")
data = json.loads(request.body)
try:
nodes = data["nodes"]
channel_id = data["channel_id"]
request.user.can_edit(channel_id)
nodes = ContentNode.objects.filter(pk__in=nodes)
for node in nodes:
if node.parent and not node.parent.changed:
node.parent.changed = True
node.parent.save()
node.delete()
except KeyError:
raise ObjectDoesNotExist("Missing attribute from data: {}".format(data))
return HttpResponse(json.dumps({'success': True}))
@authentication_classes((TokenAuthentication, SessionAuthentication))
@permission_classes((IsAuthenticated,))
def duplicate_nodes(request):
logging.debug("Entering the copy_node endpoint")
if request.method != 'POST':
return HttpResponseBadRequest("Only POST requests are allowed on this endpoint.")
data = json.loads(request.body)
try:
node_ids = data["node_ids"]
sort_order = data.get("sort_order") or 1
channel_id = data["channel_id"]
new_nodes = []
target_parent = ContentNode.objects.get(pk=data["target_parent"])
channel = target_parent.get_channel()
request.user.can_edit(channel and channel.pk)
with transaction.atomic():
with ContentNode.objects.disable_mptt_updates():
for node_id in node_ids:
new_node = duplicate_node_bulk(node_id, sort_order=sort_order, parent=target_parent, channel_id=channel_id, user=request.user)
new_nodes.append(new_node.pk)
sort_order += 1
except KeyError:
raise ObjectDoesNotExist("Missing attribute from data: {}".format(data))
serialized = ContentNodeSerializer(ContentNode.objects.filter(pk__in=new_nodes), many=True).data
return HttpResponse(JSONRenderer().render(serialized))
@authentication_classes((TokenAuthentication, SessionAuthentication))
@permission_classes((IsAuthenticated,))
def duplicate_node_inline(request):
logging.debug("Entering the copy_node endpoint")
if request.method != 'POST':
return HttpResponseBadRequest("Only POST requests are allowed on this endpoint.")
data = json.loads(request.body)
try:
node = ContentNode.objects.get(pk=data["node_id"])
channel_id = data["channel_id"]
target_parent = ContentNode.objects.get(pk=data["target_parent"])
channel = target_parent.get_channel()
request.user.can_edit(channel and channel.pk)
# record_node_duplication_stats([node], ContentNode.objects.get(pk=target_parent.pk),
# Channel.objects.get(pk=channel_id))
new_node = None
with transaction.atomic():
with ContentNode.objects.disable_mptt_updates():
sort_order = (node.sort_order + node.get_next_sibling().sort_order) / 2 if node.get_next_sibling() else node.sort_order + 1
new_node = duplicate_node_bulk(node, sort_order=sort_order, parent=target_parent, channel_id=channel_id, user=request.user)
if not new_node.title.endswith(_(" (Copy)")):
new_node.title = new_node.title + _(" (Copy)")
new_node.save()
return HttpResponse(JSONRenderer().render(ContentNodeSerializer(ContentNode.objects.filter(pk=new_node.pk), many=True).data))
except KeyError:
raise ObjectDoesNotExist("Missing attribute from data: {}".format(data))
def duplicate_node_bulk(node, sort_order=None, parent=None, channel_id=None, user=None):
if isinstance(node, int) or isinstance(node, basestring):
| |
<gh_stars>1-10
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the importmulti RPC."""
from test_framework.test_framework import BitcoinBridgeTestFramework
from test_framework.util import *
class ImportMultiTest (BitcoinBridgeTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def setup_network(self):
self.setup_nodes()
def run_test (self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
# keyword definition
PRIV_KEY = 'privkey'
PUB_KEY = 'pubkey'
ADDRESS_KEY = 'address'
SCRIPT_KEY = 'script'
node0_address1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
node0_address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
node0_address3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
#Check only one address
assert_equal(node0_address1['ismine'], True)
#Node 1 sync test
assert_equal(self.nodes[1].getblockcount(),1)
#Address Test - before import
address_info = self.nodes[1].validateaddress(node0_address1['address'])
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
# RPC importmulti -----------------------------------------------
# BitcoinBridge Address
self.log.info("Should import an address")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
watchonly_address = address['address']
watchonly_timestamp = timestamp
self.log.info("Should not import an invalid address")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": "not valid address",
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Invalid address')
# ScriptPubKey + internal
self.log.info("Should import a scriptPubKey with internal flag")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"internal": True
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + !internal
self.log.info("Should not import a scriptPubKey without internal flag")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Address + Public key + !Internal
self.log.info("Should import an address with public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"pubkeys": [ address['pubkey'] ]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + Public key + internal
self.log.info("Should import a scriptPubKey with internal and with public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address['pubkey'] ],
"internal": True
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + Public key + !internal
self.log.info("Should not import a scriptPubKey without internal and with public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address['pubkey'] ]
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Address + Private key + !watchonly
self.log.info("Should import an address with private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], True)
assert_equal(address_assert['timestamp'], timestamp)
self.log.info("Should not import an address with private key if is already imported")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -4)
assert_equal(result[0]['error']['message'], 'The wallet already contains the private key for this address or script')
# Address + Private key + watchonly
self.log.info("Should not import an address with private key and with watchonly")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ],
"watchonly": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# ScriptPubKey + Private key + internal
self.log.info("Should import a scriptPubKey with internal and with private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ],
"internal": True
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], True)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + Private key + !internal
self.log.info("Should not import a scriptPubKey without internal and with private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# P2SH address
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
transaction = self.nodes[1].gettransaction(transactionid)
self.log.info("Should import a p2sh")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
assert_equal(address_assert['isscript'], True)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], False)
# P2SH + Redeem script
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
transaction = self.nodes[1].gettransaction(transactionid)
self.log.info("Should import a p2sh with respective redeem script")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript']
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + !Watchonly
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
transaction = self.nodes[1].gettransaction(transactionid)
self.log.info("Should import a p2sh with respective redeem script and private keys")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript'],
"keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + Watchonly
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
transaction = self.nodes[1].gettransaction(transactionid)
self.log.info("Should import a p2sh with respective redeem script and private keys")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript'],
"keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])],
"watchonly": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys')
# Address + Public key + !Internal + Wrong pubkey
self.log.info("Should not import an address with a wrong public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"pubkeys": [ address2['pubkey'] ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# ScriptPubKey + Public key + internal + Wrong pubkey
self.log.info("Should not import a scriptPubKey with internal and with a wrong public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address2['pubkey'] ],
"internal": True
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Address + Private key + !watchonly + Wrong private key
self.log.info("Should not import an address with a wrong private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address2['address']) ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# ScriptPubKey + Private key + internal + Wrong private key
self.log.info("Should not import a scriptPubKey with internal and with a wrong private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address2['address']) ],
"internal": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Importing existing watch only address with new timestamp should replace saved timestamp.
assert_greater_than(timestamp, watchonly_timestamp)
self.log.info("Should replace previously saved watch only timestamp.")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": watchonly_address,
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(watchonly_address)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
watchonly_timestamp = timestamp
# restart nodes to check for proper serialization/deserialization of watch only address
self.stop_nodes()
self.start_nodes()
address_assert = self.nodes[1].validateaddress(watchonly_address)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], watchonly_timestamp)
# Bad or missing timestamps
self.log.info("Should throw on invalid or missing timestamp values")
assert_raises_message(JSONRPCException, 'Missing required timestamp field for key',
self.nodes[1].importmulti, [{
"scriptPubKey": | |
True if group['recv'] else False})
continue
# Enabled: Interface config
m = p4.match(line)
if m:
group = m.groupdict()
interface_dict.update({'enabled': group['enabled']})
continue
# Hello interval: 5000 ms; Transport IP addr: 10.169.197.254
m = p5.match(line)
if m:
group = m.groupdict()
interface_dict.update({'hello_interval_ms': int(group['hello_interval_ms'])})
interface_dict.update({'transport_ip_addr': group['transport_ip_address']})
continue
# LDP Id: 10.169.197.252:0
m = p6.match(line)
if m:
group = m.groupdict()
ldp_tdp = group['ldp_tdp'].lower()
if discovery_flag:
ldp_dict = interface_dict.setdefault('{}_id'.format(ldp_tdp), {}).setdefault(
group['ldp_tdp_id'], {})
if targeted_flag:
if targeted_dict:
targeted_dict.update({'{}_id'.format(ldp_tdp): group['ldp_tdp_id']})
continue
# Src IP addr: 10.169.197.93; Transport IP addr: 10.169.197.252
m = p7.match(line)
if m:
group = m.groupdict()
ldp_dict.update({k: v for k, v in group.items() if v})
continue
# Hold time: 15 sec; Proposed local/peer: 15/15 sec
m = p8.match(line)
if m:
group = m.groupdict()
ldp_dict.update({k: int(v) for k, v in group.items() if v})
continue
# Reachable via 10.169.197.252/32
m = p9.match(line)
if m:
group = m.groupdict()
ldp_dict.update({'reachable_via': group['reachable_via']})
continue
# Password: not required, none, in use
m = p10.match(line)
if m:
group = m.groupdict()
ldp_dict.update({'password': group['password']})
continue
# Clients: IPv4, mLDP
m = p11.match(line)
if m:
group = m.groupdict()
ldp_dict.update({'clients': group['clients']})
continue
# Targeted Hellos:
m = p13.match(line)
if m:
discovery_flag = False
targeted_flag = True
continue
# 10.81.1.1 -> 172.16.94.33 (ldp): active, xmit/recv
# 10.81.1.1 -> 172.16.25.16 (tdp): passive, xmit/recv
# 10.131.191.252 -> 10.131.159.251 (ldp): active, xmit
# 10.131.191.252 -> 10.131.159.252 (ldp): active/passive, xmit/recv
m = p12.match(line)
if m:
group = m.groupdict()
targeted_dict = local_ldp_identifier_dict.setdefault('targeted_hellos', {}). \
setdefault(group['source'], {}). \
setdefault(group['destination'], {})
targeted_dict.update({'session': group['session'].lower()})
targeted_dict.update({'xmit': True if group['xmit'] else False})
targeted_dict.update({'recv': True if group['recv'] else False})
targeted_dict.update({'active': True if group['status'] == 'active' else False})
continue
return result_dict
# ================================================
# Show mpls ldp igp sync
# ================================================
class ShowMplsLdpIgpSyncSchema(MetaParser):
"""
Schema for show mpls ldp igp sync
show mpls ldp igp sync all
show mpls ldp igp sync interface <interface>
show mpls ldp igp sync vrf <vrf>
"""
schema = {
'vrf': {
Any(): {
'interface': {
Any(): {
'ldp': {
'configured': bool,
'igp_synchronization_enabled': bool,
},
Optional('sync'): {
'status': {
Optional('enabled'): bool,
'sync_achieved': bool,
'peer_reachable': bool,
},
Optional('delay_time'): int,
Optional('left_time'): int,
},
Optional('igp'): {
'holddown_time': str,
'enabled': str
},
Optional('peer_ldp_ident'): str,
},
},
},
}
}
class ShowMplsLdpIgpSync(ShowMplsLdpIgpSyncSchema):
"""
Parser for show mpls ldp igp sync
show mpls ldp igp sync all
show mpls ldp igp sync interface <interface>
show mpls ldp igp sync vrf <vrf>
"""
cli_command = ['show mpls ldp igp sync',
'show mpls ldp igp sync {all}',
'show mpls ldp igp sync interface {interface}',
'show mpls ldp igp sync vrf {vrf}']
def cli(self, vrf="", all="", interface="", output=None):
if output is None:
if vrf:
cmd = self.cli_command[3].format(vrf=vrf)
else:
if all:
cmd = self.cli_command[1].format(all=all)
if interface:
cmd = self.cli_command[2].format(interface=interface)
if not interface and not all:
cmd = self.cli_command[0]
out = self.device.execute(cmd)
else:
out = output
if not vrf:
vrf = "default"
# initial return dictionary
result_dict = {}
# GigabitEthernet0/0/0:
p1 = re.compile(r'^(?P<interface>\S+):$')
# LDP configured; LDP-IGP Synchronization enabled.
# LDP configured; LDP-IGP Synchronization not enabled.
# LDP configured; SYNC enabled.
# LDP not configured; LDP-IGP Synchronization enabled.
p2 = re.compile(r'^LDP +(?P<configured>[\w\s]+); +(LDP\-IGP +Synchronization '
'+(?P<state>[\w\s]+))?(SYNC +(?P<sync_enabled>[\w\s]+))?.$')
# Sync status: sync achieved; peer reachable.
# Sync status: sync not achieved; peer reachable.
# Sync status: sync not achieved; peer not reachable.
p3 = re.compile(r'^(Sync|SYNC) +status: +sync +(?P<sync_status>[\w\s]+); +peer +(?P<reachable>[\w\s]+).$')
# Sync delay time: 0 seconds (0 seconds left)
p4 = re.compile(r'^Sync +delay +time: +(?P<delay_time>\d+) +seconds \((?P<left_time>\d+) +seconds +left\)$')
# IGP holddown time: infinite.
# IGP holddown time: 1 milliseconds.
p5 = re.compile(r'^IGP +holddown +time: +(?P<holddown_time>[\w\s]+).?$')
# Peer LDP Ident: 10.169.197.252:0
p6 = re.compile(r'^Peer +LDP +Ident: +(?P<peer_ldp_ident>\S+).?$')
# IGP enabled: OSPF 65109
p7 = re.compile(r'^IGP +enabled: +(?P<igp_enabled>[\S\s]+)$')
for line in out.splitlines():
line = line.strip()
# GigabitEthernet0/0/0:
m = p1.match(line)
if m:
group = m.groupdict()
interface_dict = result_dict.setdefault('vrf', {}). \
setdefault(vrf, {}). \
setdefault('interface', {}). \
setdefault(group['interface'], {})
continue
# LDP configured; LDP-IGP Synchronization enabled.
# LDP configured; LDP-IGP Synchronization not enabled.
# LDP configured; SYNC enabled.
# LDP not configured; LDP-IGP Synchronization enabled.
m = p2.match(line)
if m:
group = m.groupdict()
ldp_dict = interface_dict.setdefault('ldp', {})
configured = group['configured']
state = group['state']
sync_enabled = group['sync_enabled']
ldp_dict.update({'configured': True if configured == 'configured' else False})
if state and state == 'enabled':
ldp_dict.update({'igp_synchronization_enabled': True})
else:
ldp_dict.update({'igp_synchronization_enabled': False})
if sync_enabled:
sync_status_dict = interface_dict.setdefault('sync', {}).setdefault('status', {})
sync_status_dict.update({'enabled': True if sync_enabled == 'enabled' else False})
continue
# Sync status: sync achieved; peer reachable.
m = p3.match(line)
if m:
sync_dict = interface_dict.setdefault('sync', {})
sync_status_dict = sync_dict.setdefault('status', {})
sync_status = m.groupdict()['sync_status']
reachable = m.groupdict()['reachable']
sync_status_dict.update({'sync_achieved': True if sync_status == 'achieved' else False})
sync_status_dict.update({'peer_reachable': True if reachable == 'reachable' else False})
continue
# Sync delay time: 0 seconds (0 seconds left)
m = p4.match(line)
if m:
group = m.groupdict()
sync_dict.update({'delay_time': int(group['delay_time'])})
sync_dict.update({'left_time': int(group['left_time'])})
continue
# IGP holddown time: infinite.
m = p5.match(line)
if m:
group = m.groupdict()
igp_dict = interface_dict.setdefault('igp', {})
igp_dict.update({'holddown_time': group['holddown_time']})
continue
# Peer LDP Ident: 10.169.197.252:0
m = p6.match(line)
if m:
group = m.groupdict()
interface_dict.update({'peer_ldp_ident': group['peer_ldp_ident']})
continue
# IGP enabled: OSPF 65109
m = p7.match(line)
if m:
group = m.groupdict()
igp_dict.update({'enabled': group['igp_enabled'].lower()})
continue
return result_dict
class ShowMplsForwardingTableSchema(MetaParser):
"""
Schema for
show mpls forwarding-table
show mpls forwarding-table {prefix}
show mpls forwarding-table vrf <vrf>
show mpls forwarding-table detail
show mpls forwarding-table interface tunnel <tunnelid>
show mpls forwarding-table vrf <vrf> detail
show mpls forwarding-table <prefix> <mask> algo <algo>
"""
schema = {
'vrf':{
Any(): {
'local_label': {
Any(): {
'outgoing_label_or_vc':{
Any():{
'prefix_or_tunnel_id':{
Any(): {
Optional('outgoing_interface'):{
Any():{
Optional('bytes_label_switched'): int,
Optional('next_hop'): str,
Optional('tsp_tunnel'): bool,
Optional('merged'): bool,
Optional('mac'): int,
Optional('macstr'): str,
Optional('lstack'): str,
Optional('via'): str,
Optional('encaps'): int,
Optional('mru'): int,
Optional('label_stack'): str,
Optional('vpn_route'): str,
Optional('output_feature_configured'): bool,
Optional('load_sharing'): {
'method': str,
Optional('slots'): list,
},
Optional('broadcast'): bool,
Optional('flexalgo_info'): {
'pdb_index': int,
'metric': int,
'algo': int,
'via_srms': int,
},
}
}
},
}
}
}
}
}
}
}
}
class ShowMplsForwardingTable(ShowMplsForwardingTableSchema):
"""
Parser for
show mpls forwarding-table
show mpls forwarding-table {prefix}
show mpls forwarding-table vrf {vrf}
show mpls forwarding-table interface tunnel <tunnelid>
show mpls forwarding-table <prefix> <mask> algo <algo>
"""
cli_command = ['show mpls forwarding-table vrf {vrf}',
'show mpls forwarding-table {prefix}',
'show mpls forwarding-table',
'show mpls forwarding-table interface tunnel {tunnelid}',
'show mpls forwarding-table {prefix} {mask} algo {algo}',]
def cli(self, vrf="", prefix="",tunnelid="", mask="", algo="", output=None):
if output is None:
if vrf:
cmd = self.cli_command[0].format(vrf=vrf)
elif prefix:
cmd = self.cli_command[1].format(prefix=prefix)
elif tunnelid:
cmd = self.cli_command[3].format(tunnelid=tunnelid)
elif prefix and mask and algo:
cmd = self.cli_command[4].format(prefix=prefix, mask=mask, algo=algo)
else:
cmd = self.cli_command[2]
out = self.device.execute(cmd)
else:
out = output
if not vrf:
vrf = 'default'
# initial return dictionary
result_dict = {}
# Local Outgoing Prefix Bytes Label Outgoing Next Hop
# Label Label or Tunnel Id Switched interface
# 9301 No Label 172.16.100.1/32[V] \
# 0 Po1.51 192.168.10.253
# No Label 10.23.120.0/24[V] \
# None No Label 10.0.0.16/30 0 Gi3 10.0.0.9
# 39 [M] 16052 10.169.14.241/32 \
# 235 211 10.55.0.0/24 0 Te0/2/0.102 192.168.4.1
# 37 142409 172.16.100.1/32 \
# 0 Gi0/1/6 192.168.10.253
# 107829 172.16.100.1/32 \
# 0 Gi0/1/7 192.168.10.254
# 25 16021 0-23.23.23.23/32-4 (10:30:130:1) \
# 0 Et0/2 172.16.58.3
p1 = re.compile(r'^((?P<local_label>\d+|[Nn]one) +)?(?:\[(?P<info_tag>(?:T|M)+)\] +)?'
r'(?P<outgoing_label>(\w+|(No|Pop) +Label)) +(?P<prefix_or_tunnel_id>[\S]+) '
r'+\(?(?P<flexalgo_info>\d+:\d+:\d+:\d+)?\)?'
r'(?P<bytes_label_switched>\d*)( +(?P<interface>\S+))?( +(?P<next_hop>[\w\.]+))?$')
# [T] 16130 10.25.40.40/32 0 Tu1 point2point
# 22 [M] Pop Label 192.168.0.1/32 0 Gi2 192.168.0.2
# 22 [T] Pop Label 1/1[TE-Bind] 0 Tu1 point2point
p2_1 = re.compile(r'^(?:(?P<local_label>\w+) +)?(?:\[(?P<info_tag>(?:T|M)+)\] +)?'
r'(?P<outgoing_label>(?:(?:A|a)ggregate|Untagged|(?:No|Pop) '
r'Label|(?:No|Pop) (?:T|t)ag|\d\/\w*|\d|\d\/)+)(?:\['
r'(?P<t1>(T)+)\] +)? +(?P<prefix_or_tunnel_id>[\w\(\)\:|\S]+) '
r'+\(?(?P<flexalgo_info>\d+:\d+:\d+:\d+)?\)?'
r' +(?P<bytes_label_switched>\d*)(?: +(?P<interface>\S+))?(?: +'
r'(?P<next_hop>[\w\.]+))?$')
# 22 [T] Pop Label 1/1[TE-Bind] 0 Tu1 point2point
p2_2 = re.compile(r'^((?P<local_label>\w+) +)?(\[(?P<info_tag>(T)+)\] +)?'
r'(?P<outgoing_label>((A|a)ggregate|(No|Pop) Label|(No|Pop) tag|\d|\d\/)+)?'
r'(\[(?P<t1>(T)+)\] +)? +(?P<prefix_or_tunnel_id>[\w\.\[\]\-\s]+) '
r'+\(?(?P<flexalgo_info>\d+:\d+:\d+:\d+)?\)?'
r' +(?P<bytes_label_switched>\d+)( +(?P<interface>\S+))?( +(?P<next_hop>[\w\.]+))?$')
# MAC/Encaps=18/18, MRU=1530, Label Stack{}
# MAC/Encaps=18/18, MRU=1530, Label Stack{}, via Ls0
# MAC/Encaps=14/26, MRU=1492, Label Stack{16052 16062 16063}, via Gi0/1/7
p3 = re.compile(r'^MAC\/Encaps=(?P<mac>\d+)\/(?P<encaps>\d+), +MRU=(?P<mru>[\d]+), '
'+Label +Stack{(?P<label_stack>.*)}(, via +(?P<via>\S+))?$')
# 00002440156384B261CB1480810000330800
# AABBCC032800AABBCC0325018847 00010000
# 0050568DA282BC16652F3A178847 03EB400003EBE00003EBF000
p4 = re.compile(r'^(?P<code>[0-9A-F]+)( +(?P<lstack>\w+))?$')
# VPN route: L3VPN-0051
p5 = re.compile(r'^VPN +route: +(?P<vpn_route>\S+)$')
# No output feature configured
p6 = re.compile(r'^No +output +feature +configured$')
# Per-destination load-sharing, slots: 0 2 | |
POST /users/{username}/phonenumbers
"""
uri = self.client.base_url + "/users/" + username + "/phonenumbers"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return APIResponse(data=Phonenumber(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def DeletePublicKey(self, label, username, headers=None, query_params=None, content_type="application/json"):
"""
Delete a public key
It is method for DELETE /users/{username}/publickeys/{label}
"""
uri = self.client.base_url + "/users/" + username + "/publickeys/" + label
return self.client.delete(uri, None, headers, query_params, content_type)
def GetPublicKey(self, label, username, headers=None, query_params=None, content_type="application/json"):
"""
Get a public key
It is method for GET /users/{username}/publickeys/{label}
"""
uri = self.client.base_url + "/users/" + username + "/publickeys/" + label
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=PublicKey(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def UpdatePublicKey(self, data, label, username, headers=None, query_params=None, content_type="application/json"):
"""
Upates the label and/or key of an existing public key
It is method for PUT /users/{username}/publickeys/{label}
"""
uri = self.client.base_url + "/users/" + username + "/publickeys/" + label
resp = self.client.put(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return APIResponse(data=PublicKey(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def ListPublicKeys(self, username, headers=None, query_params=None, content_type="application/json"):
"""
Lists all public keys
It is method for GET /users/{username}/publickeys
"""
uri = self.client.base_url + "/users/" + username + "/publickeys"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(PublicKey(elem))
return APIResponse(data=resps, response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def AddPublicKey(self, data, username, headers=None, query_params=None, content_type="application/json"):
"""
Add a public key
It is method for POST /users/{username}/publickeys
"""
uri = self.client.base_url + "/users/" + username + "/publickeys"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return APIResponse(data=PublicKey(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def DeleteUserRegistryEntry(self, key, username, headers=None, query_params=None, content_type="application/json"):
"""
Removes a RegistryEntry from the user's registry
It is method for DELETE /users/{username}/registry/{key}
"""
uri = self.client.base_url + "/users/" + username + "/registry/" + key
return self.client.delete(uri, None, headers, query_params, content_type)
def GetUserRegistryEntry(self, key, username, headers=None, query_params=None, content_type="application/json"):
"""
Get a RegistryEntry from the user's registry.
It is method for GET /users/{username}/registry/{key}
"""
uri = self.client.base_url + "/users/" + username + "/registry/" + key
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=RegistryEntry(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def ListUserRegistry(self, username, headers=None, query_params=None, content_type="application/json"):
"""
Lists the Registry entries
It is method for GET /users/{username}/registry
"""
uri = self.client.base_url + "/users/" + username + "/registry"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(RegistryEntry(elem))
return APIResponse(data=resps, response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def AddUserRegistryEntry(self, data, username, headers=None, query_params=None, content_type="application/json"):
"""
Adds a RegistryEntry to the user's registry, if the key is already used, it is overwritten.
It is method for POST /users/{username}/registry
"""
uri = self.client.base_url + "/users/" + username + "/registry"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return APIResponse(data=RegistryEntry(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def SignSeeObject(
self,
data,
version,
uniqueid,
globalid,
username,
headers=None,
query_params=None,
content_type="application/json",
):
"""
Sign a see object
It is method for PUT /users/{username}/see/{uniqueid}/{globalid}/sign/{version}
"""
uri = self.client.base_url + "/users/" + username + "/see/" + uniqueid + "/" + globalid + "/sign/" + version
resp = self.client.put(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return APIResponse(data=SeeView(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def GetSeeObject(
self, uniqueid, globalid, username, headers=None, query_params=None, content_type="application/json"
):
"""
Get a see object
It is method for GET /users/{username}/see/{uniqueid}/{globalid}
"""
uri = self.client.base_url + "/users/" + username + "/see/" + uniqueid + "/" + globalid
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=See(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def UpdateSeeObject(
self, data, uniqueid, globalid, username, headers=None, query_params=None, content_type="application/json"
):
"""
Updates a see object
It is method for PUT /users/{username}/see/{uniqueid}/{globalid}
"""
uri = self.client.base_url + "/users/" + username + "/see/" + uniqueid + "/" + globalid
resp = self.client.put(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return APIResponse(data=SeeView(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def GetSeeObjects(self, username, headers=None, query_params=None, content_type="application/json"):
"""
Get a list of all see objects.
It is method for GET /users/{username}/see
"""
uri = self.client.base_url + "/users/" + username + "/see"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(SeeView(elem))
return APIResponse(data=resps, response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def CreateSeeObject(self, data, username, headers=None, query_params=None, content_type="application/json"):
"""
Create new see object
It is method for POST /users/{username}/see
"""
uri = self.client.base_url + "/users/" + username + "/see"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return APIResponse(data=SeeView(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def RemoveTOTP(self, username, headers=None, query_params=None, content_type="application/json"):
"""
Disable TOTP two-factor authentication.
It is method for DELETE /users/{username}/totp
"""
uri = self.client.base_url + "/users/" + username + "/totp"
return self.client.delete(uri, None, headers, query_params, content_type)
def GetTOTPSecret(self, username, headers=None, query_params=None, content_type="application/json"):
"""
Get a TOTP secret and issuer that can be used for setting up two-factor authentication.
It is method for GET /users/{username}/totp
"""
uri = self.client.base_url + "/users/" + username + "/totp"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=TOTPSecret(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def SetupTOTP(self, data, username, headers=None, query_params=None, content_type="application/json"):
"""
Enable two-factor authentication using TOTP.
It is method for POST /users/{username}/totp
"""
uri = self.client.base_url + "/users/" + username + "/totp"
return self.client.post(uri, data, headers, query_params, content_type)
def GetTwoFAMethods(self, username, headers=None, query_params=None, content_type="application/json"):
"""
Get the possible two-factor authentication methods"
It is method for GET /users/{username}/twofamethods
"""
uri = self.client.base_url + "/users/" + username + "/twofamethods"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=TwoFAMethods(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
| |
can not exceed 50. (see [Pagination](https://developer.toornament.com/v2/overview/pagination))
:param id: The string id of the tournament.
:param name: The string to be looked for in the name or full name.
:param disciplines: One or several disciplines to filter.
:param statuses: One or several tournament statuses to filter.
:param scheduled_before: A date to include all tournaments scheduled to take place before or at the date, in ISO 8601 format (only the date part, with YYYY-MM-DD pattern).
:param scheduled_after: A date to include all tournaments scheduled to take place after or at the date, in ISO 8601 format (only the date part, with YYYY-MM-DD pattern).
:param countries: One or several countries to filter in ISO 3166-1 alpha-2 country codes format (some codes may not be supported)
:param platforms: One or several platforms to filter.
:param is_online: Whether the tournament is played online.
:param sort: Sorts the collection in a particular order. "scheduled_asc" sorts the tournaments by scheduled date from the oldest to the most recent one; "scheduled_desc" sorts the tournaments by scheduled date from the most recent to the oldest one."""
id = str(id)
method = 'GET'
path = '/playlists/{id}/tournaments'
path_mapping = {
'id': id,
}
query_parameters = {
}
if name:
query_parameters['name'] = name
if disciplines:
query_parameters['disciplines'] = disciplines
if statuses:
query_parameters['statuses'] = statuses
if scheduled_before:
query_parameters['scheduled_before'] = scheduled_before
if scheduled_after:
query_parameters['scheduled_after'] = scheduled_after
if countries:
query_parameters['countries'] = countries
if platforms:
query_parameters['platforms'] = platforms
if is_online:
query_parameters['is_online'] = is_online
if sort:
query_parameters['sort'] = sort
if not range.unit:
range.unit = 'tournaments'
headers = {
'Range': range.get_header_value(),
}
content = self._simple_access(method, path, path_parameters = path_mapping, query_parameters = query_parameters,
headers = headers)
return [Tournament(**tour) for tour in content]
class AsyncViewerAPI(AsyncToornamentConnection):
@staticmethod
def _base_url():
return 'https://api.toornament.com/viewer/v2'
async def get_match(self, tournament_id, id):
"""Retrieve a single match of a tournament.
Returns a match with all its games and opponents. In ffa matches only the first four opponents are included in each match game.
:param tournament_id: The id of the tournament you want to retrieve data about.
:param id: The id of the match to retrieve."""
tournament_id = str(tournament_id)
id = str(id)
method = 'GET'
path = '/tournaments/{tournament_id}/matches/{id}'
path_mapping = {
'tournament_id': tournament_id,
'id': id,
}
query_parameters = {
}
headers = {
}
content = await self._simple_access(method, path, path_parameters = path_mapping,
query_parameters = query_parameters,
headers = headers)
return MatchDetailed(**content)
async def get_matches_from_tournament(self, tournament_id, *, range: Range, stage_ids: Optional[list] = None,
stage_numbers: Optional[list] = None, group_ids: Optional[list] = None,
group_numbers: Optional[list] = None, round_ids: Optional[list] = None,
round_numbers: Optional[list] = None, statuses: Optional[list] = None,
is_scheduled: Optional[bool] = None, scheduled_before: Optional[str] = None,
scheduled_after: Optional[str] = None, participant_ids: Optional[list] = None,
sort: Optional[str] = None):
"""Retrieve matches of a tournament.
Returns the matches of a tournament. In ffa matches only the first four opponents are included in each match.
:param range: A range of requested items using the 'matches' unit. The size of the range can not exceed 128. (see [Pagination](https://developer.toornament.com/v2/overview/pagination))
:param tournament_id: The id of the tournament you want to retrieve data about.
:param stage_ids: One or several stage ids to filter.
:param stage_numbers: One or several stage numbers to filter.
:param group_ids: One or several group ids to filter.
:param group_numbers: One or several group numbers to filter.
:param round_ids: One or several round ids to filter.
:param round_numbers: One or several round numbers to filter.
:param statuses: One or several match statuses to filter.
:param is_scheduled: Whether to include scheduled matches.
:param scheduled_before: A datetime in RFC 3339 format (combined date, time and utc offset), to include all matches scheduled before or at the datetime.
:param scheduled_after: A datetime in RFC 3339 format (combined date, time and utc offset), to include all matches scheduled after or at the datetime
:param participant_ids: One or several participant ids involved in the matches to filter.
:param sort: A method to sort the filtered data. "structure" sorts using the stage, group, round and match numbers. "schedule" sorts using the scheduled date. "latest results" sorts using the date at which the matches were played (not scheduled)."""
tournament_id = str(tournament_id)
stage_ids = [str(e) for e in stage_ids] if stage_ids else stage_ids
group_ids = [str(e) for e in group_ids] if group_ids else group_ids
round_ids = [str(e) for e in round_ids] if round_ids else round_ids
participant_ids = [str(e) for e in participant_ids] if participant_ids else participant_ids
method = 'GET'
path = '/tournaments/{tournament_id}/matches'
path_mapping = {
'tournament_id': tournament_id,
}
query_parameters = {
}
if stage_ids:
query_parameters['stage_ids'] = stage_ids
if stage_numbers:
query_parameters['stage_numbers'] = stage_numbers
if group_ids:
query_parameters['group_ids'] = group_ids
if group_numbers:
query_parameters['group_numbers'] = group_numbers
if round_ids:
query_parameters['round_ids'] = round_ids
if round_numbers:
query_parameters['round_numbers'] = round_numbers
if statuses:
query_parameters['statuses'] = statuses
if is_scheduled:
query_parameters['is_scheduled'] = is_scheduled
if scheduled_before:
query_parameters['scheduled_before'] = scheduled_before
if scheduled_after:
query_parameters['scheduled_after'] = scheduled_after
if participant_ids:
query_parameters['participant_ids'] = participant_ids
if sort:
query_parameters['sort'] = sort
if not range.unit:
range.unit = 'matches'
headers = {
'Range': range.get_header_value(),
}
content = await self._simple_access(method, path, path_parameters = path_mapping,
query_parameters = query_parameters,
headers = headers)
return [Match(**match) for match in content]
async def get_matches_from_discipline(self, discipline_id, *, range: Range, is_featured: Optional[bool] = None,
statuses: Optional[list] = None, scheduled_before: Optional[str] = None,
scheduled_after: Optional[str] = None, participant_ids: Optional[list] = None,
tournament_ids: Optional[list] = None, sort: Optional[str] = None):
"""Retrieve matches of a discipline, regardless of their tournament.
Returns matches of a discipline. In ffa matches only the first four opponents are included in each match game.
:param range: A range of requested items using the 'matches' unit. The size of the range can not exceed 128. (see [Pagination](https://developer.toornament.com/v2/overview/pagination))
:param discipline_id: The string id of the discipline.
:param is_featured: Whether to include featured tournaments.
:param statuses: One or several match statuses to filter.
:param scheduled_before: A datetime in RFC 3339 format (combined date, time and utc offset), to include all matches scheduled before or at the datetime.
:param scheduled_after: A datetime in RFC 3339 format (combined date, time and utc offset), to include all matches scheduled after or at the datetime
:param participant_ids: One or several participant ids involved in the matches to filter.
:param tournament_ids: List of tournament IDs to filter the data with.
:param sort: A method to sort the filtered data. "structure" sorts using the stage, group, round and match numbers. "schedule" sorts using the scheduled date. "latest results" sorts using the date at which the matches were played (not scheduled)."""
discipline_id = str(discipline_id)
participant_ids = [str(e) for e in participant_ids] if participant_ids else participant_ids
tournament_ids = [str(e) for e in tournament_ids] if tournament_ids else tournament_ids
method = 'GET'
path = '/disciplines/{discipline_id}/matches'
path_mapping = {
'discipline_id': discipline_id,
}
query_parameters = {
}
if is_featured:
query_parameters['is_featured'] = is_featured
if statuses:
query_parameters['statuses'] = statuses
if scheduled_before:
query_parameters['scheduled_before'] = scheduled_before
if scheduled_after:
query_parameters['scheduled_after'] = scheduled_after
if participant_ids:
query_parameters['participant_ids'] = participant_ids
if tournament_ids:
query_parameters['tournament_ids'] = tournament_ids
if sort:
query_parameters['sort'] = sort
if not range.unit:
range.unit = 'matches'
headers = {
'Range': range.get_header_value(),
}
content = await self._simple_access(method, path, path_parameters = path_mapping,
query_parameters = query_parameters,
headers = headers)
return [MatchDiscipline(**match) for match in content]
async def get_bracket_nodes(self, tournament_id, stage_id, *, range: Range, group_ids: Optional[list] = None,
group_numbers: Optional[list] = None, round_ids: Optional[list] = None,
round_numbers: Optional[list] = None, min_depth: Optional[int] = None,
max_depth: Optional[int] = None):
"""Retrieve bracket nodes of a stage and tournament.
Returns the bracket nodes of a stage. A bracket node represents a match and some extra data.
:param range: A range of requested items using the 'nodes' unit. The size of the range can not exceed 128. (see [Pagination](https://developer.toornament.com/v2/overview/pagination))
:param tournament_id: The id of the tournament you want to retrieve data about.
:param stage_id: The id of the stage you want to retrieve data about.
:param group_ids: A list of group ids to filter.
:param group_numbers: A list of group numbers to filter.
:param round_ids: A list of round ids to filter.
:param round_numbers: | |
'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = | |
<filename>im_process_GUI.py
import base64
from tkinter import *
from tkinter import ttk
# from tkinter.filedialog import askopenfilename
from PIL import ImageTk, Image
from tkinter import filedialog
import requests
from pymodm import connect
import cv2
import zipfile
import numpy as np
import io
from imageio import imread, imwrite
import matplotlib
matplotlib.use('TkAgg')
# matplotlib.use('Agg')
root = Tk() # makes main window
root.title("GUI Client")
main_frame = Frame(root)
main_frame.pack()
URL = "http://127.0.0.1:5000"
# URL = "http://vcm-9060.vm.duke.edu:5000"
def main():
init_mongo_db()
first_screen()
def init_mongo_db():
"""Initializes the connection to the database through mongoDB
"""
connect("mongodb+srv://mlw60:Wm347609@bme547-r5nv9."
"mongodb.net/test?retryWrites=true")
def first_screen():
"""The first screen of the GUI
The first_screen function first asks the user to enter a
username and saves that username as a string variable. It
then has a button for the user to upload their raw image(s)
to which they would like to perform image processing. The user
can choose a single image, multiple images, or a zip file of
images to upload. Then, the user clicks continue, which calls
the continue function, which sends the username to the server
and goes to the next screen of the GUI.
"""
first_frame = Frame(root)
first_frame.pack()
top_label = ttk.Label(first_frame, text="Please enter a username below:")
top_label.grid(column=0, row=0)
username = StringVar()
username_entry = ttk.Entry(first_frame, textvariable=username, width=20)
username_entry.grid(column=0, row=1)
browse_btn = ttk.Button(first_frame, text='Upload Raw Image(s)',
command=lambda: browse_function(first_frame))
browse_btn.grid(column=0, row=2)
ok_btn = ttk.Button(first_frame, text='Continue',
command=lambda: cont_function(username,
first_frame))
ok_btn.grid(column=1, row=3)
root.mainloop() # shows window
def browse_function(first_frame):
"""Creates a dialog box for the user to choose image files from
their own local computer
Allows the user to upload their raw image(s) to which they would
like to perform image processing. The user can choose a single image,
multiple images, or a zip file of images to upload.
"""
global raw_filenames
root.filename = \
filedialog.askopenfilenames(initialdir="/", title="Select Image")
first_file = root.filename[0]
if first_file.lower().endswith('zip') is True:
zf = zipfile.ZipFile(root.filename[0], 'r')
raw_filenames = zf.namelist()
num_files = len(raw_filenames)
else:
raw_filenames = root.filename
num_files = len(raw_filenames)
file_label = ttk.Label(first_frame,
text="{} file(s) uploaded".format(num_files))
file_label.grid(column=0, row=3)
def cont_function(username, first_frame):
"""Posts username information to the server and proceeds
to the next page of the GUI
Args:
username (tkinter.StringVar): user-specified username to identify
each unique user
first_frame (tkinter.Frame): frame of the first screen that is
destroyed to move on to the second screen
"""
new_user = {"user_name": username.get()
}
requests.post(URL+'/user_name', json=new_user)
try:
if len(raw_filenames) == 0:
raise KeyError("No images selected.")
else:
for i in raw_filenames:
if (i.lower().endswith("jpeg") is False) \
and (i.lower().endswith("jpg") is False) \
and (i.lower().endswith("tiff") is False) \
and (i.lower().endswith("tif") is False) \
and (i.lower().endswith("png") is False):
raise TypeError("Images are not the right file type.")
except NameError:
return "No images selected."
if not username.get():
raise KeyError("No username entered")
second_screen(username, first_frame)
pass
def second_screen(username, first_frame):
"""The second screen of the GUI
The second_screen function first destroys the first screen in order
to display a new screen of the GUI. It asks the user to choose the
image processing step they would like to user on their uploaded
image(s). It saves the processing types as an IntVar for later
use. It then provides a continue button that calls the process
image function when clicked.
Args:
username ('tkinter.StringVar'): user-specified username to identify
each unique user
first_frame ('tkinter.Frame'): frame of the first screen that is
destroyed to move on to the second screen
Returns:
tkinter.Frame: frame of the second screen that is destroyed
to move on to the third GUI screen
"""
first_frame.destroy()
second_frame = Frame(root)
second_frame.pack()
top_label = ttk.Label(second_frame,
text="Select image processing step:",
font=("Helvetica", 20))
top_label.grid(column=0, row=0)
process_method = IntVar()
ttk.Radiobutton(second_frame, text="Histogram Equalization",
variable=process_method,
value=1).grid(column=0, row=1, sticky=W)
ttk.Radiobutton(second_frame, text="Contrast Stretching",
variable=process_method,
value=2).grid(column=0, row=2, sticky=W)
ttk.Radiobutton(second_frame, text="Log Compression",
variable=process_method,
value=3).grid(column=0, row=3, sticky=W)
ttk.Radiobutton(second_frame, text="Reverse Video",
variable=process_method,
value=4).grid(column=0, row=4, sticky=W)
ok_btn = ttk.Button(second_frame, text='Continue',
command=lambda:
process_image(username, process_method, second_frame))
ok_btn.grid(column=0, row=7)
root.mainloop() # shows window
return second_frame
def process_image(username, process_method, second_frame):
"""Converts images to b64 string, parses the processing
type variable, and sends information to server
The process_image function first encodes the raw images and
turns them into a b64 strings. It then reads in the IntVar that
was specified previously by the user to determine which processing
type the user chose. It finally sends the username, raw b64 strings,
and specified processing type to the server and calls the third
screen function to proceed to the next screen in the GUI.
Args:
username (tkinter.StringVar): user-specified username to identify
each unique user
process_method (tkinter.IntVar): a value of either 1, 2, 3, or 4
that corresponds with histogram equalization, contrast stretching,
log compression, or reverse video
second_frame (tkinter.Frame): frame of the second screen that is
destroyed to move on to the third GUI screen
"""
raw_b64_strings = []
for i in range(len(raw_filenames)):
with open(raw_filenames[i], "rb") as raw_image_file:
raw_b64_bytes = base64.b64encode(raw_image_file.read())
raw_b64_string = str(raw_b64_bytes, encoding='utf-8')
raw_b64_strings.append(raw_b64_string)
if process_method.get() == 1:
processing_type = 'hist_eq'
elif process_method.get() == 2:
processing_type = 'con_stretch'
elif process_method.get() == 3:
processing_type = 'log_comp'
elif process_method.get() == 4:
processing_type = 'reverse_vid'
else:
processing_type = 'hist_eq'
user_processing_type = {"user_name": username.get(),
"raw_b64_strings": raw_b64_strings,
"processing_type": processing_type}
requests.post(URL+'/processing_type', json=user_processing_type)
third_screen(username, second_frame)
return
def third_screen(username, second_frame):
"""The third screen in the GUI
This function first calls the get time metadata function
and displays the time uploaded, time to process, and image
size for the last picture in the raw_filenames list. It has
a button to click to display the raw and processed images
side by side and also has a button to click to display the
raw vs. processed histograms with red, green, and blue
plots. It gives the user the option to download the
processed images in the form of TIFF, JPEG, and PNG. Finally,
it gives the user the option to return to the first screen
in the GUI, finish and exit out of the interface, or to choose
another processing method for the same images.
Args:
username (tkinter.StringVar): user-specified username to identify
each unique user
second_frame (tkinter.Frame): frame of the second screen that is
destroyed to move on to the third GUI screen
Returns:
tkinter.Frame: frame of the third screen that is
destroyed to move on to the next GUI screens
"""
second_frame.destroy()
third_frame = Frame(root)
third_frame.pack()
time_uploaded, process_time = get_time_metadata(username)
ttk.Label(third_frame, text="Time Uploaded: {}".
format(time_uploaded)).grid(column=0,
row=0, columnspan=2, sticky=W)
ttk.Label(third_frame, text="Time to Process: {}".
format(process_time)).grid(column=0,
row=1, columnspan=2, sticky=W)
im_size = cv2.imread(raw_filenames[-1], cv2.IMREAD_UNCHANGED)
height = im_size.shape[0]
width = im_size.shape[1]
ttk.Label(third_frame, text="Image Size: {} x {} pixels".
format(width, height)).grid(column=0, row=2,
columnspan=2, sticky=W)
display_img = ttk.Button(third_frame,
text='View Raw and Processed Images',
command=lambda: image_window(username))
display_img.grid(column=0, row=3)
display_hist = ttk.Button(third_frame,
text='View Histograms',
command=lambda: hist_window(username))
display_hist.grid(column=0, row=4)
image_download = ttk.Label(third_frame,
text="Download processed image as:")
image_download.grid(column=2, row=0,
rowspan=2, columnspan=2, sticky=W)
image_format = StringVar()
dropdown_menu = ttk.Combobox(third_frame, textvariable=image_format)
dropdown_menu.grid(column=2, row=2, sticky=W)
dropdown_menu['values'] = ('JPEG', 'PNG', 'TIFF')
reprocess_btn = ttk.Button(third_frame,
text='Apply another Processing Method',
command=lambda:
reprocess_function(username, third_frame))
reprocess_btn.grid(column=0, row=5, sticky=W, pady=20)
download_btn = ttk.Button(third_frame, text='Download',
command=lambda: download_function
(username, image_format, third_frame))
download_btn.grid(column=2, row=3, sticky=W)
finish_btn = ttk.Button(third_frame,
text='Finish & Exit',
command=lambda:
finish_function(third_frame))
finish_btn.grid(column=2, row=5, sticky=W, pady=20)
new_image_btn = ttk.Button(third_frame,
text='Return to Homepage',
command=lambda: return_function(third_frame)
)
new_image_btn.grid(column=1, row=5, sticky=W, pady=20)
root.mainloop() # shows window
return third_frame
def return_function(third_frame):
"""Returns the user to the first GUI screen
Args:
third_frame (tkinter.Frame): frame of the third screen that is
destroyed to move on to the first screen again
"""
third_frame.destroy()
first_screen()
def get_time_metadata(username):
"""Gets the time uploaded and processing time for the uploaded image(s)
Args:
username (tkinter.StringVar): user-specified username to identify
each unique user
Returns:
str: the time that the image(s) were uploaded to the database
str: the amount of time the server took to process the image(s)
"""
r = requests.get(URL+'/time_metadata/'+username.get())
r_json = r.json()
time_uploaded = r_json['time_uploaded']
process_time = r_json['process_time']
return time_uploaded, process_time
def get_processed_image(username):
"""Gets the b64 strings of the processed images from the server and
converts them to image files
Args:
username (tkinter.StringVar): user-specified username to identify
each unique user
Returns:
JpegImageFile: the image file of the processed image
"""
proc_images_bytes = []
r = requests.get(URL+'/processed_image/'+username.get())
r_json = r.json()
proc_b64_strings = r_json['processed_images']
for i in range(len(proc_b64_strings)):
proc_image_bytes = base64.b64decode(proc_b64_strings[i])
proc_images_bytes.append(proc_image_bytes)
return proc_images_bytes
def display_histogram(fig, ax, img, image_win, img_type):
"""Creates and displays a histogram of pixel intensity
Args:
| |
'''
self._pylock.release()
return
@ivi_synchronized
def wait_for_event(self, event_id, timeout=hightime.timedelta(seconds=10.0)):
r'''wait_for_event
Waits until the specified channel(s) have generated the specified event.
The session monitors whether each type of event has occurred at least
once since the last time this method or the initiate
method were called. If an event has only been generated once and you
call this method successively, the method times out. Individual
events must be generated between separate calls of this method.
Note:
Refer to `Supported Methods by
Device <REPLACE_DRIVER_SPECIFIC_URL_2(nidcpowercref.chm',%20'supportedfunctions)>`__
for more information about supported devices.
Tip:
This method can be called on specific channels within your :py:class:`nidcpower.Session` instance.
Use Python index notation on the repeated capabilities container channels to specify a subset,
and then call this method on the result.
Example: :py:meth:`my_session.channels[ ... ].wait_for_event`
To call the method on all channels, you can call it directly on the :py:class:`nidcpower.Session`.
Example: :py:meth:`my_session.wait_for_event`
Args:
event_id (enums.Event): Specifies which event to wait for.
**Defined Values:**
+-------------------------------------------------+--------------------------------------------------+
| NIDCPOWER_VAL_SOURCE_COMPLETE_EVENT | Waits for the Source Complete event. |
+-------------------------------------------------+--------------------------------------------------+
| NIDCPOWER_VAL_MEASURE_COMPLETE_EVENT | Waits for the Measure Complete event. |
+-------------------------------------------------+--------------------------------------------------+
| NIDCPOWER_VAL_SEQUENCE_ITERATION_COMPLETE_EVENT | Waits for the Sequence Iteration Complete event. |
+-------------------------------------------------+--------------------------------------------------+
| NIDCPOWER_VAL_SEQUENCE_ENGINE_DONE_EVENT | Waits for the Sequence Engine Done event. |
+-------------------------------------------------+--------------------------------------------------+
| NIDCPOWER_VAL_PULSE_COMPLETE_EVENT | Waits for the Pulse Complete event. |
+-------------------------------------------------+--------------------------------------------------+
| NIDCPOWER_VAL_READY_FOR_PULSE_TRIGGER_EVENT | Waits for the Ready for Pulse Trigger event. |
+-------------------------------------------------+--------------------------------------------------+
Note:
One or more of the referenced values are not in the Python API for this driver. Enums that only define values, or represent True/False, have been removed.
timeout (hightime.timedelta, datetime.timedelta, or float in seconds): Specifies the maximum time allowed for this method to complete, in
seconds. If the method does not complete within this time interval,
NI-DCPower returns an error.
Note:
When setting the timeout interval, ensure you take into account any
triggers so that the timeout interval is long enough for your
application.
'''
if type(event_id) is not enums.Event:
raise TypeError('Parameter event_id must be of type ' + str(enums.Event))
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
event_id_ctype = _visatype.ViInt32(event_id.value) # case S130
timeout_ctype = _converters.convert_timedelta_to_seconds_real64(timeout) # case S140
error_code = self._library.niDCPower_WaitForEventWithChannels(vi_ctype, channel_name_ctype, event_id_ctype, timeout_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
def _error_message(self, error_code):
r'''_error_message
Converts a status code returned by an instrument driver method into a
user-readable string.
Args:
error_code (int): Specifies the **status** parameter that is returned from any of the
NI-DCPower methods.
Returns:
error_message (str): Returns the user-readable message string that corresponds to the status
code you specify.
You must pass a ViChar array with at least 256 bytes.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
error_code_ctype = _visatype.ViStatus(error_code) # case S150
error_message_ctype = (_visatype.ViChar * 256)() # case C070
error_code = self._library.niDCPower_error_message(vi_ctype, error_code_ctype, error_message_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=True)
return error_message_ctype.value.decode(self._encoding)
class Session(_SessionBase):
'''An NI-DCPower session to a National Instruments Programmable Power Supply or Source Measure Unit.'''
def __init__(self, resource_name, channels=None, reset=False, options={}, independent_channels=True):
r'''An NI-DCPower session to a National Instruments Programmable Power Supply or Source Measure Unit.
Creates and returns a new NI-DCPower session to the instrument(s) and channel(s) specified
in **resource name** to be used in all subsequent NI-DCPower method calls. With this method,
you can optionally set the initial state of the following session properties:
- simulate
- driver_setup
After calling this method, the specified channel or channels will be in the Uncommitted
state.
To place channel(s) in a known start-up state when creating a new session, set **reset** to
True. This action is equivalent to using the reset method immediately after initializing the
session.
To open a session and leave the channel(s) in an existing configuration without passing
through a transitional output state, set **reset** to False. Next, configure the channel(s)
as in the previous session, change the desired settings, and then call the initiate method
to write both settings.
**Details of Independent Channel Operation**
With this method and channel-based NI-DCPower methods and properties, you can use any
channels in the session independently. For example, you can initiate a subset of channels in
the session with initiate, and the other channels in the session remain in the Uncommitted
state.
When you initialize with independent channels, each channel steps through the NI-DCPower
programming state model independently of all other channels, and you can specify a subset of
channels for most operations.
**Note** You can make concurrent calls to a session from multiple threads, but the session
executes the calls one at a time. If you specify multiple channels for a method or property,
the session may perform the operation on multiple channels in parallel, though this is not
guaranteed, and some operations may execute sequentially.
Args:
resource_name (str, list, tuple): Specifies the **resource name** as seen in Measurement
& Automation Explorer (MAX) or lsni, for example "PXI1Slot3" where "PXI1Slot3" is an
instrument's **resource name**. If independent_channels is False, **resource name**
can also be a logical IVI name.
If independent_channels is True, **resource name** can be names of the instrument(s)
and the channel(s) to initialize. Specify the instrument(s) and channel(s) using the
form "PXI1Slot3/0,PXI1Slot3/2-3,PXI1Slot4/2-3 or
PXI1Slot3/0,PXI1Slot3/2:3,PXI1Slot4/2:3", where "PXI1Slot3" and "PXI1Slot4" are
instrument resource names followed by channels. If you exclude a channels string
after an instrument resource name, all channels of the instrument(s) are included in
the session.
channels (str, list, range, tuple): For new applications, use the default value of None
and specify the channels in **resource name**.
Specifies which output channel(s) to include in a new session. Specify multiple
channels by using a channel list or a channel range. A channel list is a comma (,)
separated sequence of channel names (for example, 0,2 specifies channels 0 and 2).
A channel range is a lower bound channel followed by a hyphen (-) or colon (:)
followed by an upper bound channel (for example, 0-2 specifies channels 0, 1,
and 2).
If independent_channels is False, this argument specifies which channels to include
in a legacy synchronized channels session. If you do not specify any channels, by
default all channels on the device are included in the session.
If independent_channels is True, this argument combines with **resource name** to
specify which channels to include in an independent channels session. Initializing
an independent channels session with a channels argument is deprecated.
reset (bool): Specifies whether to reset channel(s) during the initialization procedure.
options (dict): Specifies the initial value of certain properties for the session. The
syntax for **options** is a dictionary of properties with an assigned
value. For example:
{ 'simulate': False }
You do not have to specify a value for all the properties. If you do not
specify a value for a property, the default value is used.
Advanced Example:
{ 'simulate': True, 'driver_setup': { 'Model': '<model number>', 'BoardType': '<type>' } }
+-------------------------+---------+
| Property | Default |
+=========================+=========+
| range_check | True |
+-------------------------+---------+
| query_instrument_status | False |
+-------------------------+---------+
| cache | True |
+-------------------------+---------+
| simulate | False |
+-------------------------+---------+
| record_value_coersions | False |
+-------------------------+---------+
| driver_setup | {} |
+-------------------------+---------+
independent_channels (bool): Specifies whether to initialize the session with
independent channels. Set this argument to False on legacy applications or if you
are unable to upgrade your NI-DCPower driver runtime to 20.6 or higher.
Returns:
session (nidcpower.Session): A session object representing the device.
'''
super(Session, self).__init__(repeated_capability_list=[], vi=None, library=None, encoding=None, freeze_it=False)
resource_name = _converters.convert_repeated_capabilities_without_prefix(resource_name)
channels = _converters.convert_repeated_capabilities_without_prefix(channels)
options = _converters.convert_init_with_options_dictionary(options)
self._library = _library_singleton.get()
self._encoding = 'windows-1251'
# Call specified init function
self._vi = 0 # This must be set before calling _fancy_initialize().
self._vi = self._fancy_initialize(resource_name, channels, reset, options, independent_channels)
# Store the parameter list for later printing in __repr__
param_list = []
param_list.append("resource_name=" + pp.pformat(resource_name))
param_list.append("channels=" + pp.pformat(channels))
param_list.append("reset=" + pp.pformat(reset))
param_list.append("options=" + pp.pformat(options))
param_list.append("independent_channels=" + pp.pformat(independent_channels))
self._param_list = ', '.join(param_list)
self._is_frozen = True
def __enter__(self):
return self
def __exit__(self, exc_type, | |
- 1, -1, -1):
if chain_variance == 0.0:
c = 0.0
else:
c = chain_variance / (fwd_variance[t] + chain_variance)
mean[t] = c * fwd_mean[t] + (1 - c) * mean[t + 1]
return mean, fwd_mean
def compute_expected_log_prob(self):
"""Compute the expected log probability given values of m.
The appendix describes the Expectation of log-probabilities in equation 5 of the DTM paper;
The below implementation is the result of solving the equation and is implemented as in the original
Blei DTM code.
Returns
-------
numpy.ndarray of float
The expected value for the log probabilities for each word and time slice.
"""
for (w, t), val in np.ndenumerate(self.e_log_prob):
self.e_log_prob[w][t] = self.mean[w][t + 1] - np.log(self.zeta[t])
return self.e_log_prob
def sslm_counts_init(self, obs_variance, chain_variance, sstats):
"""Initialize the State Space Language Model with LDA sufficient statistics.
Called for each topic-chain and initializes initial mean, variance and Topic-Word probabilities
for the first time-slice.
Parameters
----------
obs_variance : float, optional
Observed variance used to approximate the true and forward variance.
chain_variance : float
Gaussian parameter defined in the beta distribution to dictate how the beta values evolve over time.
sstats : numpy.ndarray
Sufficient statistics of the LDA model. Corresponds to matrix beta in the linked paper for time slice 0,
expected shape (`self.vocab_len`, `num_topics`).
"""
W = self.vocab_len
T = self.num_time_slices
log_norm_counts = np.copy(sstats)
log_norm_counts = log_norm_counts / sum(log_norm_counts)
log_norm_counts = log_norm_counts + 1.0 / W
log_norm_counts = log_norm_counts / sum(log_norm_counts)
log_norm_counts = np.log(log_norm_counts)
# setting variational observations to transformed counts
self.obs = (np.repeat(log_norm_counts, T, axis=0)).reshape(W, T)
# set variational parameters
self.obs_variance = obs_variance
self.chain_variance = chain_variance
# compute post variance, mean
for w in range(0, W):
self.variance[w], self.fwd_variance[w] = self.compute_post_variance(w, self.chain_variance)
self.mean[w], self.fwd_mean[w] = self.compute_post_mean(w, self.chain_variance)
self.zeta = self.update_zeta()
self.e_log_prob = self.compute_expected_log_prob()
def fit_sslm(self, sstats):
"""Fits variational distribution.
This is essentially the m-step.
Maximizes the approximation of the true posterior for a particular topic using the provided sufficient
statistics. Updates the values using :meth:`~gensim.models.ldaseqmodel.sslm.update_obs` and
:meth:`~gensim.models.ldaseqmodel.sslm.compute_expected_log_prob`.
Parameters
----------
sstats : numpy.ndarray
Sufficient statistics for a particular topic. Corresponds to matrix beta in the linked paper for the
current time slice, expected shape (`self.vocab_len`, `num_topics`).
Returns
-------
float
The lower bound for the true posterior achieved using the fitted approximate distribution.
"""
W = self.vocab_len
bound = 0
old_bound = 0
sslm_fit_threshold = 1e-6
sslm_max_iter = 2
converged = sslm_fit_threshold + 1
# computing variance, fwd_variance
self.variance, self.fwd_variance = \
(np.array(x) for x in list(zip(*[self.compute_post_variance(w, self.chain_variance) for w in range(0, W)])))
# column sum of sstats
totals = sstats.sum(axis=0)
iter_ = 0
model = "DTM"
if model == "DTM":
bound = self.compute_bound(sstats, totals)
if model == "DIM":
bound = self.compute_bound_fixed(sstats, totals)
logger.info("initial sslm bound is %f", bound)
while converged > sslm_fit_threshold and iter_ < sslm_max_iter:
iter_ += 1
old_bound = bound
self.obs, self.zeta = self.update_obs(sstats, totals)
if model == "DTM":
bound = self.compute_bound(sstats, totals)
if model == "DIM":
bound = self.compute_bound_fixed(sstats, totals)
converged = np.fabs((bound - old_bound) / old_bound)
logger.info("iteration %i iteration lda seq bound is %f convergence is %f", iter_, bound, converged)
self.e_log_prob = self.compute_expected_log_prob()
return bound
def compute_bound(self, sstats, totals):
"""Compute the maximized lower bound achieved for the log probability of the true posterior.
Uses the formula presented in the appendix of the DTM paper (formula no. 5).
Parameters
----------
sstats : numpy.ndarray
Sufficient statistics for a particular topic. Corresponds to matrix beta in the linked paper for the first
time slice, expected shape (`self.vocab_len`, `num_topics`).
totals : list of int of length `len(self.time_slice)`
The totals for each time slice.
Returns
-------
float
The maximized lower bound.
"""
w = self.vocab_len
t = self.num_time_slices
term_1 = 0
term_2 = 0
term_3 = 0
val = 0
ent = 0
chain_variance = self.chain_variance
# computing mean, fwd_mean
self.mean, self.fwd_mean = \
(np.array(x) for x in zip(*[self.compute_post_mean(w, self.chain_variance) for w in range(0, w)]))
self.zeta = self.update_zeta()
for w in range(0, w):
val += (self.variance[w][0] - self.variance[w][t]) / 2 * chain_variance
logger.info("Computing bound, all times")
for t in range(1, t + 1):
term_1 = 0.0
term_2 = 0.0
ent = 0.0
for w in range(0, w):
m = self.mean[w][t]
prev_m = self.mean[w][t - 1]
v = self.variance[w][t]
# w_phi_l is only used in Document Influence Model; the values are always zero in this case
# w_phi_l = sslm.w_phi_l[w][t - 1]
# exp_i = np.exp(-prev_m)
# term_1 += (np.power(m - prev_m - (w_phi_l * exp_i), 2) / (2 * chain_variance)) -
# (v / chain_variance) - np.log(chain_variance)
term_1 += \
(np.power(m - prev_m, 2) / (2 * chain_variance)) - (v / chain_variance) - np.log(chain_variance)
term_2 += sstats[w][t - 1] * m
ent += np.log(v) / 2 # note the 2pi's cancel with term1 (see doc)
term_3 = -totals[t - 1] * np.log(self.zeta[t - 1])
val += term_2 + term_3 + ent - term_1
return val
def update_obs(self, sstats, totals):
"""Optimize the bound with respect to the observed variables.
TODO:
This is by far the slowest function in the whole algorithm.
Replacing or improving the performance of this would greatly speed things up.
Parameters
----------
sstats : numpy.ndarray
Sufficient statistics for a particular topic. Corresponds to matrix beta in the linked paper for the first
time slice, expected shape (`self.vocab_len`, `num_topics`).
totals : list of int of length `len(self.time_slice)`
The totals for each time slice.
Returns
-------
(numpy.ndarray of float, numpy.ndarray of float)
The updated optimized values for obs and the zeta variational parameter.
"""
OBS_NORM_CUTOFF = 2
STEP_SIZE = 0.01
TOL = 1e-3
W = self.vocab_len
T = self.num_time_slices
runs = 0
mean_deriv_mtx = np.resize(np.zeros(T * (T + 1)), (T, T + 1))
norm_cutoff_obs = None
for w in range(0, W):
w_counts = sstats[w]
counts_norm = 0
# now we find L2 norm of w_counts
for i in range(0, len(w_counts)):
counts_norm += w_counts[i] * w_counts[i]
counts_norm = np.sqrt(counts_norm)
if counts_norm < OBS_NORM_CUTOFF and norm_cutoff_obs is not None:
obs = self.obs[w]
norm_cutoff_obs = np.copy(obs)
else:
if counts_norm < OBS_NORM_CUTOFF:
w_counts = np.zeros(len(w_counts))
# TODO: apply lambda function
for t in range(0, T):
mean_deriv = mean_deriv_mtx[t]
mean_deriv = self.compute_mean_deriv(w, t, mean_deriv)
mean_deriv_mtx[t] = mean_deriv
deriv = np.zeros(T)
args = self, w_counts, totals, mean_deriv_mtx, w, deriv
obs = self.obs[w]
model = "DTM"
if model == "DTM":
# slowest part of method
obs = optimize.fmin_cg(
f=f_obs, fprime=df_obs, x0=obs, gtol=TOL, args=args, epsilon=STEP_SIZE, disp=0
)
if model == "DIM":
pass
runs += 1
if counts_norm < OBS_NORM_CUTOFF:
norm_cutoff_obs = obs
self.obs[w] = obs
self.zeta = self.update_zeta()
return self.obs, self.zeta
def compute_mean_deriv(self, word, time, deriv):
"""Helper functions for optimizing a function.
Compute the derivative of:
.. :math::
E[\beta_{t,w}]/d obs_{s,w} for t = 1:T.
Parameters
----------
word : int
The word's ID.
time : int
The time slice.
deriv : list of float
Derivative for each time slice.
Returns
-------
list of float
Mean derivative for each time slice.
"""
T = self.num_time_slices
fwd_variance = self.variance[word]
deriv[0] = 0
# forward pass
for t in range(1, T + 1):
if self.obs_variance > 0.0:
w = self.obs_variance / (fwd_variance[t - 1] + self.chain_variance + self.obs_variance)
else:
w = 0.0
val = w * deriv[t - 1]
if time == t - 1:
val += (1 - w)
deriv[t] = val
for t in range(T - 1, -1, -1):
if self.chain_variance == 0.0:
w = 0.0
else:
w = self.chain_variance / (fwd_variance[t] + self.chain_variance)
deriv[t] = w * deriv[t] + (1 - w) * deriv[t + 1]
return deriv
def compute_obs_deriv(self, word, word_counts, totals, mean_deriv_mtx, deriv):
"""Derivation of obs which is used in derivative function `df_obs` while optimizing.
Parameters
----------
word : int
The word's ID.
word_counts : list of int
Total word counts for each time slice.
totals : list of int of length `len(self.time_slice)`
The totals for each time slice.
mean_deriv_mtx : list of float
Mean derivative for each time slice.
deriv : list of float
Mean derivative for | |
beta.connector.cs.getsockname()
assert ixBeta.ca == beta.connector.ca
assert ixBeta.ha, beta.connector.ha
request = dict([('method', u'GET'),
('path', u'/echo?name=fame'),
('qargs', dict()),
('fragment', u''),
('headers', dict([('Accept', 'application/json')])),
('body', None),
])
beta.requests.append(request)
while (beta.requests or beta.connector.txbs) and not ixBeta.rxbs :
beta.service()
time.sleep(0.05)
alpha.serviceReceivesAllIx()
time.sleep(0.05)
msgIn = bytes(ixBeta.rxbs)
assert msgIn == (b'GET /echo?name=fame HTTP/1.1\r\n'
b'Host: 127.0.0.1:6101\r\n'
b'Accept-Encoding: identity\r\n'
b'Accept: application/json\r\n\r\n')
ixBeta.clearRxbs() # ensure no stale stuff in beta rx buffer at Alpha
# build response
msgOut = (b'HTTP/1.1 200 OK\r\n'
b'Content-Length: 122\r\n'
b'Content-Type: application/json\r\n'
b'Date: Thu, 30 Apr 2015 19:37:17 GMT\r\n'
b'Server: IoBook.local\r\n\r\n'
b'{"content": null, '
b'"query": {"name": "fame"}, '
b'"verb": "GET", '
b'"url": "http://127.0.0.1:8080/echo?name=fame", '
b'"action": null}')
ixBeta.tx(msgOut)
while ixBeta.txbs or not beta.respondent.ended:
alpha.serviceSendsAllIx()
time.sleep(0.05)
beta.service()
time.sleep(0.05)
assert not beta.connector.rxbs
assert not beta.waited
assert beta.respondent.ended
assert len(beta.responses) == 1
response = beta.responses.popleft()
assert response == {'version': (1, 1),
'status': 200,
'reason': 'OK',
'headers': {'Content-Length': '122',
'Content-Type': 'application/json',
'Date': 'Thu, 30 Apr 2015 19:37:17 GMT',
'Server': 'IoBook.local'},
'body': bytearray(b'{"content": null, "query": {"name": "fame"}, "verb": "GE'
b'T", "url": "http://127.0.0.1:8080/echo?name=fame", "acti'
b'on": null}'),
'data': {'content': None,
'query': {'name': 'fame'},
'verb': 'GET',
'url': 'http://127.0.0.1:8080/echo?name=fame',
'action': None},
'request': {'method': 'GET',
'path': '/echo',
'qargs': {'name': 'fame'},
'fragment': '',
'headers': {'Accept': 'application/json'},
'body': b'',
'host': '127.0.0.1',
'port': 6101,
'scheme': 'http',
'data': None,
'fargs': None},
'errored': False,
'error': None}
# resend request in pipeline mode
beta.requests.append(request)
while ( beta.requests or beta.connector.txbs) and not ixBeta.rxbs :
beta.service()
time.sleep(0.05)
alpha.serviceReceivesAllIx()
time.sleep(0.05)
msgIn = bytes(ixBeta.rxbs)
assert msgIn == (b'GET /echo?name=fame HTTP/1.1\r\n'
b'Host: 127.0.0.1:6101\r\n'
b'Accept-Encoding: identity\r\n'
b'Accept: application/json\r\n\r\n')
ixBeta.clearRxbs()
# build response
msgOut =( b'HTTP/1.1 200 OK\r\n'
b'Content-Length: 122\r\n'
b'Content-Type: application/json\r\n'
b'Date: Thu, 30 Apr 2015 19:37:17 GMT\r\n'
b'Server: IoBook.local\r\n\r\n'
b'{"content": null, '
b'"query": {"name": "fame"}, '
b'"verb": "GET", '
b'"url": "http://127.0.0.1:8080/echo?name=fame", '
b'"action": null}')
ixBeta.tx(msgOut)
while ixBeta.txbs or not beta.respondent.ended:
alpha.serviceSendsAllIx()
time.sleep(0.05)
beta.service()
time.sleep(0.05)
assert not beta.connector.rxbs
assert not beta.waited
assert beta.respondent.ended
assert len(beta.responses) == 1
response = beta.responses.popleft()
assert response == {'version': (1, 1),
'status': 200,
'reason': 'OK',
'headers':
{'Content-Length': '122',
'Content-Type': 'application/json',
'Date': 'Thu, 30 Apr 2015 19:37:17 GMT',
'Server': 'IoBook.local'},
'body': bytearray(b'{"content": null, "query": {"name": "fame"}, "verb": "GE'
b'T", "url": "http://127.0.0.1:8080/echo?name=fame", "acti'
b'on": null}'),
'data': {'action': None,
'content': None,
'query': {'name': 'fame'},
'url': 'http://127.0.0.1:8080/echo?name=fame',
'verb': 'GET'},
'error': None,
'errored': False,
'request': {'host': '127.0.0.1',
'port': 6101,
'scheme': 'http',
'method': 'GET',
'path': '/echo',
'qargs': {'name': 'fame'},
'fragment': '',
'headers': {'Accept': 'application/json'},
'body': b'',
'data': None,
'fargs': None,
}
}
alpha.close()
beta.close()
def test_client_pipeline_echo_simple():
"""
Test CLient pipeline servicing
"""
alpha = tcp.Server(port = 6101, bufsize=131072)
assert alpha.reopen()
assert alpha.ha == ('0.0.0.0', 6101)
assert alpha.eha == ('127.0.0.1', 6101)
host = alpha.eha[0]
port = alpha.eha[1]
method = u'GET'
path = u'/echo?name=fame'
headers = dict([('Accept', 'application/json')])
beta = clienting.Client(bufsize=131072,
hostname=host,
port=port,
method=method,
path=path,
headers=headers,
)
assert beta.reopen()
assert not beta.connector.accepted
assert not beta.connector.connected
assert not beta.connector.cutoff
# connect Client Beta to Server Alpha
while True:
beta.connector.serviceConnect()
alpha.serviceConnects()
if beta.connector.connected and beta.connector.ca in alpha.ixes:
break
time.sleep(0.05)
assert beta.connector.accepted
assert beta.connector.connected
assert not beta.connector.cutoff
assert beta.connector.ca == beta.connector.cs.getsockname()
assert beta.connector.ha == beta.connector.cs.getpeername()
assert alpha.eha == beta.connector.ha
ixBeta = alpha.ixes[beta.connector.ca]
assert ixBeta.ca is not None
assert ixBeta.cs is not None
assert ixBeta.cs.getsockname() == beta.connector.cs.getpeername()
assert ixBeta.cs.getpeername() == beta.connector.cs.getsockname()
assert ixBeta.ca == beta.connector.ca
assert ixBeta.ha, beta.connector.ha
request = dict([('method', u'GET'),
('path', u'/echo?name=fame'),
('qargs', dict()),
('fragment', u''),
('headers', dict([('Accept', 'application/json')])),
('body', None),
])
beta.requests.append(request)
while (not alpha.ixes or beta.requests or
beta.connector.txbs or not beta.respondent.ended):
mockEchoService(alpha)
time.sleep(0.05)
beta.service()
time.sleep(0.05)
assert not beta.connector.rxbs
assert not beta.waited
assert beta.respondent.ended
assert len(beta.responses) == 1
response = beta.respond() # returns next response from .responses
assert response == clienting.Response(version=(1, 1),
status=200,
reason='OK',
headers=Hict([('Content-Length', '122'),
('Content-Type', 'application/json'),
('Date', 'Thu, 30 Apr 2015 19:37:17 GMT'),
('Server', 'IoBook.local')]),
body=bytearray(b'{"content": null, '
b'"query": {"name": "fame"}, '
b'"verb": "GET", '
b'"url": "http://127.0.0.1:8080/echo?name=fame", '
b'"action": null}'),
data={'content': None,
'query': {'name': 'fame'},
'verb': 'GET',
'url': 'http://127.0.0.1:8080/echo?name=fame',
'action': None},
request={'method': 'GET',
'path': '/echo',
'qargs': {'name': 'fame'},
'fragment': '',
'headers': Hict([('Accept', 'application/json')]),
'body': b'',
'host': '127.0.0.1',
'port': 6101,
'scheme': 'http',
'data': None,
'fargs': None},
errored=False,
error=None)
# pipeline request to send again
beta.requests.append(request)
while (not alpha.ixes or beta.requests or
beta.connector.txbs or not beta.respondent.ended):
mockEchoService(alpha)
time.sleep(0.05)
beta.service()
time.sleep(0.05)
assert not beta.connector.rxbs
assert not beta.waited
assert beta.respondent.ended
assert len(beta.responses) == 1
response = beta.responses.popleft()
assert response == {'version': (1, 1),
'status': 200,
'reason': 'OK',
'headers': Hict([('Content-Length', '122'), ('Content-Type', 'application/json'), ('Date', 'Thu, 30 Apr 2015 19:37:17 GMT'), ('Server', 'IoBook.local')]),
'body': bytearray(b'{"content": null, "query": {"name": "fame"}, "verb": "GE'
b'T", "url": "http://127.0.0.1:8080/echo?name=fame", "acti'
b'on": null}'),
'data': {'content': None,
'query': {'name': 'fame'},
'verb': 'GET',
'url': 'http://127.0.0.1:8080/echo?name=fame',
'action': None},
'request': {'method': 'GET',
'path': '/echo',
'qargs': {'name': 'fame'},
'fragment': '',
'headers': Hict([('Accept', 'application/json')]),
'body': b'',
'host': '127.0.0.1',
'port': 6101,
'scheme': 'http',
'data': None,
'fargs': None},
'errored': False,
'error': None}
alpha.close()
beta.close()
def test_client_echo_simple_host_port_path():
"""
Test Client Simple First time request pattern
"""
alpha = tcp.Server(port = 6101, bufsize=131072)
assert alpha.reopen()
assert alpha.ha == ('0.0.0.0', 6101)
assert alpha.eha == ('127.0.0.1', 6101)
host = alpha.eha[0]
port = alpha.eha[1]
method = u'GET'
path = u'/echo?name=fame'
headers = dict([('Accept', 'application/json')])
beta = clienting.Client(bufsize=131072,
hostname=host,
port=port,
method=method,
path=path,
headers=headers,
)
assert beta.reopen()
assert not beta.connector.accepted
assert not beta.connector.connected
assert not beta.connector.cutoff
beta.transmit()
while (not alpha.ixes or beta.requests or
beta.connector.txbs or not beta.respondent.ended):
mockEchoService(alpha)
time.sleep(0.05)
beta.service()
time.sleep(0.05)
assert not beta.connector.rxbs
assert not beta.waited
assert beta.respondent.ended
assert len(beta.responses) == 1
response = beta.responses.popleft()
assert response == {'version': (1, 1),
'status': 200,
'reason': 'OK',
'headers': Hict([('Content-Length', '122'), ('Content-Type', 'application/json'), ('Date', 'Thu, 30 Apr 2015 19:37:17 GMT'), ('Server', 'IoBook.local')]),
'body': bytearray(b'{"content": null, "query": {"name": "fame"}, "verb": "GE'
b'T", "url": "http://127.0.0.1:8080/echo?name=fame", "acti'
b'on": null}'),
'data': {'content': None,
'query': {'name': 'fame'},
'verb': 'GET',
'url': 'http://127.0.0.1:8080/echo?name=fame',
'action': None},
'request': {'host': '127.0.0.1',
'port': 6101,
'scheme': 'http',
'method': 'GET',
'path': '/echo',
'fragment': '',
'qargs': {'name': 'fame'},
'headers': Hict([('Accept', 'application/json')]),
'body': b'',
'data': None,
'fargs': None},
'errored': False,
'error': None}
beta.request(method=u'GET',
path=u'/echo?name=fame',
headers=dict([('Accept', 'application/json')]))
while (not alpha.ixes or beta.requests or
beta.connector.txbs or not beta.respondent.ended):
mockEchoService(alpha)
time.sleep(0.05)
beta.service()
time.sleep(0.05)
assert not beta.connector.rxbs
assert not beta.waited
assert beta.respondent.ended
assert len(beta.responses) == 1
response = beta.responses.popleft()
assert response == {'version': (1, 1),
'status': 200,
'reason': 'OK',
'headers': Hict([('Content-Length', '122'), ('Content-Type', 'application/json'), ('Date', 'Thu, 30 Apr 2015 19:37:17 GMT'), ('Server', 'IoBook.local')]),
'body': bytearray(b'{"content": null, "query": {"name": "fame"}, "verb": "GE'
b'T", "url": "http://127.0.0.1:8080/echo?name=fame", "acti'
b'on": null}'),
'data': {'content': None,
'query': {'name': 'fame'},
'verb': 'GET',
'url': 'http://127.0.0.1:8080/echo?name=fame',
'action': None},
'request': {'method': 'GET',
'path': '/echo',
'qargs': {'name': 'fame'},
'fragment': '',
'headers': Hict([('Accept', 'application/json')]),
'body': b'',
'data': None,
'fargs': None,
'host': '127.0.0.1',
'port': 6101,
'scheme': 'http'},
'errored': False,
'error': None}
alpha.close()
beta.close()
def test_client_pipline_echo_simple_path_scheme():
"""
Test Client pipeline servicing using path components for host port scheme
"""
alpha = tcp.Server(port = 6101, bufsize=131072)
assert alpha.reopen()
assert alpha.ha == ('0.0.0.0', 6101)
assert alpha.eha == ('127.0.0.1', 6101)
path = "http://{0}:{1}/".format('localhost', alpha.eha[1]) # needed for connect
# method and headers set in request
beta = clienting.Client(bufsize=131072,
path=path,
reconnectable=True, # do not close connection
)
assert beta.reopen()
assert not beta.connector.accepted
assert not beta.connector.connected
assert not beta.connector.cutoff
request = dict([('method', u'GET'),
('path', u'/echo?name=fame'),
('qargs', dict()),
('fragment', u''),
('headers', dict([('Accept', 'application/json')])),
('body', None),
])
beta.requests.append(request)
while (not alpha.ixes or beta.requests or
beta.connector.txbs or not beta.respondent.ended):
mockEchoService(alpha) # mockEchoServiceLocalhost
time.sleep(0.05)
beta.service()
time.sleep(0.05)
assert not beta.connector.rxbs
assert not beta.waited
assert beta.respondent.ended
assert len(beta.responses) == 1
response = beta.responses.popleft()
assert response == {'version': (1, 1),
'status': 200,
'reason': 'OK',
'headers': Hict([('Content-Length', '122'), ('Content-Type', 'application/json'), ('Date', 'Thu, 30 Apr 2015 19:37:17 GMT'), ('Server', 'IoBook.local')]),
'body': bytearray(b'{"content": null, "query": {"name": "fame"}, "verb": "GE'
b'T", "url": "http://127.0.0.1:8080/echo?name=fame", "acti'
b'on": null}'),
'data': {'content': None,
'query': {'name': 'fame'},
'verb': 'GET',
'url': 'http://127.0.0.1:8080/echo?name=fame',
'action': None},
'request': {'method': 'GET',
'path': '/echo',
'qargs': {'name': 'fame'},
'fragment': '',
'headers': Hict([('Accept', 'application/json')]),
'body': b'',
'host': 'localhost',
'port': 6101,
'scheme': 'http',
'data': None,
'fargs': None},
'errored': False,
'error': None}
beta.requests.append(request) # pipeline request
while (not alpha.ixes or beta.requests or
beta.connector.txbs or not beta.respondent.ended):
mockEchoService(alpha)
time.sleep(0.05)
beta.service()
time.sleep(0.05)
assert not beta.connector.rxbs
assert not beta.waited
assert beta.respondent.ended
assert len(beta.responses) == 1
response = beta.responses.popleft()
assert response == {'version': (1, 1),
'status': 200,
'reason': 'OK',
'headers': Hict([('Content-Length', | |
is not None:
antennas = np.einsum('ijmno,ki,lj', antennas, pmat, pmat)
# Prefactors
e_f = (f_use / self.f_pivot)**self.specin_omega / self.norm_pivot
prefac = 0.5 * (2 * e_f / 5)**2
if deltaOmega_norm:
prefac *= (4*np.pi)**2
# Loop over detectors
inoivar = np.zeros([len(t_use), npix])
for iA in range(self.ndet):
for iB in range(self.ndet):
iS_AB = iS_f[:, iA, iB]
for iC in range(self.ndet):
gBC = antennas[iB, iC, :, :, :]
if iB == iC and no_autos[iB, iC]:
continue
for iD in range(self.ndet):
iS_CD = iS_f[:, iC, iD]
gDA = antennas[iD, iA, :, :, :]
if iA == iD and no_autos[iA, iD]:
continue
ff = df*prefac*iS_AB*iS_CD
inoivar += np.sum(ff[None, :, None] *
np.real(gBC * gDA), axis=1)
return np.squeeze(inoivar)
def get_pi_curve(self, t, f, nside, is_fspacing_log=False,
no_autos=False, beta_range=[-10, 10],
nsigma=1, proj=None):
""" Computes the power-law-integrated (PI) sensitivity curve
for this network (see arXiv:1310.5300).
Args:
t (float or array_like): `N_t` time values (in s). If a single
number is passed, then the "rigid network" approximation
is used, and this time is interpreted as the total
observing time. Otherwise, an integral over time is
performed.
f: array of `N_f` frequency values (in Hz). This will be the
frequencies at which the PI curve will be sampled, and also
the frequencies used for numerical integration.
nside: HEALPix resolution parameter. Used to create
maps of the antenna pattern and computes its sky
average.
no_autos (bool, or array_like): if a single `True`
value, all detector auto-correlations will be
removed. If a 1D array, only the auto-correlations
for which the array element is `True` will be
removed. If a 2D array, all autos and cross-
correlations for which the array element is `True`
will be removed.
beta_range: a list containing the range of power law indices
for which the PI curve will be computed.
nsigma: S/N of the PI curve (default: 1-sigma).
proj (dictionary or `None`): if you want to project the data
onto a set of linear combinations of the detectors, pass
the linear coefficients of those here. `proj` should be
a dictionary with two items: 'vectors' containing a 2D
array (or a single vector) with the linear coefficients
as rows and 'deproj'. If 'deproj' is `True`, then those
linear combinations will actually be projeted out. If
`proj` is `None`, then no projection or de-projection
will happen.
Returns:
array_like: array of size `N_f`.
"""
t_use = np.atleast_1d(t)
f_use = f
if is_fspacing_log:
dlf = np.mean(np.diff(np.log(f)))
df = f * dlf
else:
df = np.mean(np.diff(f)) * np.ones(len(f))
inv_dsig2_dnu_dt = self.get_dsigm2_dnu_t(t_use, f_use, nside,
no_autos=no_autos,
proj=proj)
# Sum over time
if len(t_use) == 1:
inv_dsig2_dnu = np.squeeze(inv_dsig2_dnu_dt * t)
else:
dt = np.mean(np.diff(t_use))
inv_dsig2_dnu = np.sum(inv_dsig2_dnu_dt, axis=0) * dt
def _om(beta):
# Sum over frequencies
plaw = (f_use/self.f_pivot)**beta
snm2 = np.sum(inv_dsig2_dnu * plaw**2 * df)
return nsigma * plaw / np.sqrt(snm2)
betas = np.linspace(beta_range[0], beta_range[1], 100)
oms = np.array([_om(b) for b in betas])
pi = np.max(oms, axis=0)
return pi
def get_dsigm2_dnu_t(self, t, f, nside, no_autos=False,
proj=None):
""" Computes :math:`d\\sigma^{-2}/df\\,dt` for a set
of frequencies and times.
Args:
t: array of `N_t` time values (in s).
f: array of `N_f` frequency values (in Hz).
nside: HEALPix resolution parameter. Used to create
maps of the antenna pattern and computes its sky
average.
no_autos (bool, or array_like): if a single `True`
value, all detector auto-correlations will be
removed. If a 1D array, only the auto-correlations
for which the array element is `True` will be
removed. If a 2D array, all autos and cross-
correlations for which the array element is `True`
will be removed.
proj (dictionary or `None`): if you want to project the data
onto a set of linear combinations of the detectors, pass
the linear coefficients of those here. `proj` should be
a dictionary with two items: 'vectors' containing a 2D
array (or a single vector) with the linear coefficients
as rows and 'deproj'. If 'deproj' is `True`, then those
linear combinations will actually be projeted out. If
`proj` is `None`, then no projection or de-projection
will happen.
Returns:
array_like: array of shape `[N_t, N_f]`.
"""
if np.ndim(no_autos) == 0:
no_autos = np.array([no_autos] * self.ndet)
else:
if len(no_autos) != self.ndet:
raise ValueError("No autos should have %d elements" %
self.ndet)
if np.ndim(no_autos) == 1:
no_autos = np.diag(no_autos)
no_autos = np.array(no_autos)
pmat = self._compute_projector(proj)
t_use = np.atleast_1d(t)
f_use = f
npix = hp.nside2npix(nside)
pix_area = 4*np.pi/npix
th, ph = hp.pix2ang(nside, np.arange(npix))
ct, st, cp, sp = self._precompute_skyvec(th, ph)
# Get S matrix:
iS_f = self._get_iS_f(f_use, pmat)
# Get all maps
gammas = np.zeros([self.ndet, self.ndet,
len(t_use), len(f_use)],
dtype=np.cdouble)
for i1 in range(self.ndet):
for i2 in range(i1, self.ndet):
a12 = self._get_antenna_ij(i1, i2, t_use, f_use,
ct, st, cp, sp,
inc_baseline=True)
# Sky integral
ia12 = np.sum(a12, axis=-1) * pix_area
gammas[i1, i2, :, :] = ia12
if i2 != i1:
gammas[i2, i1, :, :] = np.conj(ia12)
if pmat is not None:
gammas = np.einsum('ijmn,ki,lj', gammas, pmat, pmat)
# Translation between Omega and I
e_f = (self.f_pivot / f_use)**3 / self.norm_pivot
# Prefactors
prefac = 0.5 * (2 * e_f / 5)**2
# Loop over detectors
inoivar = np.zeros([len(t_use), len(f_use)])
for iA in range(self.ndet):
for iB in range(self.ndet):
iS_AB = iS_f[:, iA, iB]
for iC in range(self.ndet):
rBC = gammas[iB, iC, :, :]
if iB == iC and no_autos[iB, iC]:
continue
for iD in range(self.ndet):
iS_CD = iS_f[:, iC, iD]
rDA = gammas[iD, iA, :, :]
if iA == iD and no_autos[iA, iD]:
continue
ff = prefac*iS_AB*iS_CD
inoivar += ff[None, :] * np.real(rBC * rDA)
return np.squeeze(inoivar)
def get_N_ell(self, t, f, nside, is_fspacing_log=False,
no_autos=False, deltaOmega_norm=True, proj=None):
""" Computes :math:`N_\\ell` for this network.
Args:
t (float or array_like): `N_t` time values (in s). If a single
number is passed, then the "rigid network" approximation
is used, and this time is interpreted as the total
observing time. Otherwise, an integral over time is
performed.
f: array of `N_f` frequency values (in Hz).
nside: HEALPix resolution parameter used to compute spherical
harmonic transforms.
is_fspacing_log: if `True`, `f` is log-spaced
(linearly-spaced otherwise).
(Default: `False`).
no_autos (bool, or array_like): if a single `True`
value, all detector auto-correlations will be
removed. If a 1D array, only the auto-correlations
for which the array element is `True` will be
removed. If a 2D array, all autos and cross-
correlations for which the array element is `True`
will be removed.
deltaOmega_norm: if `True`, the quantity being mapped is
:math:`\\delta\\Omega = (\\Omega/\\bar{\\Omega}-1)/4\\pi`.
Otherwise the :math:`4\\pi` factor is omitted.
(Default: `True`).
proj (dictionary or `None`): if you want to project the data
onto a set of linear combinations of the detectors, pass
the linear coefficients of those here. `proj` should be
a dictionary with two items: 'vectors' containing a 2D
array (or a single vector) with the linear coefficients
as rows and 'deproj'. If 'deproj' is `True`, then those
linear combinations will actually be projeted out. If
`proj` is `None`, then no projection or de-projection
will happen.
Returns:
array_like: array of size `N_l = 3 * nside` containing the noise
power spectrum.
"""
t_use = np.atleast_1d(t)
f_use = np.atleast_1d(f)
if is_fspacing_log:
dlf = np.mean(np.diff(np.log(f)))
df = f * dlf
else:
df = np.mean(np.diff(f)) * np.ones(len(f))
gls = self.get_G_ell(t_use, f_use, nside, no_autos=no_autos,
deltaOmega_norm=deltaOmega_norm, proj=proj)
# Sum over frequencies
gls = np.sum(gls * df[:, None, None], axis=0)
# Sum over times
if len(t_use) == 1:
gls = np.squeeze(gls * t)
else:
dt = np.mean(np.diff(t_use))
gls = np.sum(gls, axis=0) * dt
return 1/gls
def get_G_ell(self, t, f, nside, no_autos=False, deltaOmega_norm=True,
proj=None):
""" Computes :math:`G_\\ell` in Eq. 37 of the companion paper.
Args:
t: array of `N_t` time values (in s).
f: array of `N_f` frequency values (in Hz).
nside: HEALPix resolution parameter used to compute spherical
harmonic transforms.
no_autos (bool, or array_like): if a single `True`
value, all detector auto-correlations will be
removed. If a 1D array, only the auto-correlations
for which the array element is `True` will be
| |
assert res.json['errors'][0]['detail'] == 'Deactivated users cannot be added as contributors.'
def test_add_contributor_user_is_deactivated_unregistered_payload(
self, app, user, url_published):
user_contrib = UserFactory()
user_contrib.date_disabled = datetime.utcnow()
user_contrib.save()
payload = {
'data': {
'type': 'contributors',
'attributes': {
'full_name': user_contrib.fullname,
'email': user_contrib.username
},
}
}
res = app.post_json_api(
url_published, payload,
auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Deactivated users cannot be added as contributors.'
def test_add_contributor_index_returned(
self, app, user, data_user_two,
data_user_three, url_published):
res = app.post_json_api(url_published, data_user_two, auth=user.auth)
assert res.status_code == 201
assert res.json['data']['attributes']['index'] == 1
res = app.post_json_api(url_published, data_user_three, auth=user.auth)
assert res.status_code == 201
assert res.json['data']['attributes']['index'] == 2
def test_add_contributor_set_index_out_of_range(
self, app, user, user_two, preprint_published, url_published):
user_contrib_one = UserFactory()
preprint_published.add_contributor(user_contrib_one, save=True)
user_contrib_two = UserFactory()
preprint_published.add_contributor(user_contrib_two, save=True)
payload = {
'data': {
'type': 'contributors',
'attributes': {
'index': 4
},
'relationships': {
'users': {
'data': {
'type': 'users',
'id': user_two._id
}
}
}
}
}
res = app.post_json_api(
url_published, payload,
auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == '4 is not a valid contributor index for node with id {}'.format(
preprint_published._id)
def test_add_contributor_set_index_first(
self, app, user, user_two, preprint_published, url_published):
user_contrib_one = UserFactory()
preprint_published.add_contributor(user_contrib_one, save=True)
user_contrib_two = UserFactory()
preprint_published.add_contributor(user_contrib_two, save=True)
payload = {
'data': {
'type': 'contributors',
'attributes': {
'index': 0
},
'relationships': {
'users': {
'data': {
'type': 'users',
'id': user_two._id
}
}
}
}
}
res = app.post_json_api(url_published, payload, auth=user.auth)
preprint_published.reload()
assert res.status_code == 201
contributor_obj = preprint_published.preprintcontributor_set.get(user=user_two)
index = list(
preprint_published.get_preprintcontributor_order()
).index(contributor_obj.pk)
assert index == 0
def test_add_contributor_set_index_last(
self, app, user, user_two, preprint_published, url_published):
user_contrib_one = UserFactory()
preprint_published.add_contributor(user_contrib_one, save=True)
user_contrib_two = UserFactory()
preprint_published.add_contributor(user_contrib_two, save=True)
payload = {
'data': {
'type': 'contributors',
'attributes': {
'index': 3
},
'relationships': {
'users': {
'data': {
'type': 'users',
'id': user_two._id
}
}
}
}
}
res = app.post_json_api(url_published, payload, auth=user.auth)
preprint_published.reload()
assert res.status_code == 201
contributor_obj = preprint_published.preprintcontributor_set.get(user=user_two)
index = list(
preprint_published.get_preprintcontributor_order()
).index(contributor_obj.pk)
assert index == 3
def test_add_inactive_merged_user_as_contributor(
self, app, user, url_published):
primary_user = UserFactory()
merged_user = UserFactory(merged_by=primary_user)
payload = {
'data': {
'type': 'contributors',
'attributes': {},
'relationships': {
'users': {
'data': {
'type': 'users',
'id': merged_user._id
}
}
}
}
}
res = app.post_json_api(url_published, payload, auth=user.auth)
assert res.status_code == 201
contributor_added = res.json['data']['embeds']['users']['data']['id']
assert contributor_added == primary_user._id
def test_add_unconfirmed_user_by_guid(
self, app, user, preprint_published, url_published):
unconfirmed_user = UnconfirmedUserFactory()
payload = {
'data': {
'type': 'contributors',
'attributes': {},
'relationships': {
'users': {
'data': {
'type': 'users',
'id': unconfirmed_user._id
}
}
}
}
}
res = app.post_json_api(
url_published, payload,
auth=user.auth, expect_errors=True)
assert res.status_code == 404
# if adding unregistered contrib by guid, fullname must be supplied
assert (
res.json['errors'][0]['detail'] ==
'Cannot add unconfirmed user {} to resource {}. You need to provide a full_name.'
.format(unconfirmed_user._id, preprint_published._id))
payload['data']['attributes']['full_name'] = '<NAME>'
res = app.post_json_api(
url_published, payload,
auth=user.auth, expect_errors=True)
assert res.status_code == 201
assert res.json['data']['attributes']['unregistered_contributor'] == '<NAME>'
@pytest.mark.django_db
class TestPreprintContributorCreateValidation(NodeCRUDTestCase):
@pytest.fixture()
def validate_data(self):
return NodeContributorsCreateSerializer.validate_data
def test_add_contributor_validation(self, preprint_published, validate_data):
# test_add_contributor_validation_user_id
validate_data(
NodeContributorsCreateSerializer(),
preprint_published,
user_id='abcde')
# test_add_contributor_validation_user_id_fullname
validate_data(
NodeContributorsCreateSerializer(),
preprint_published,
user_id='abcde',
full_name='Kanye')
# test_add_contributor_validation_user_id_email
with pytest.raises(exceptions.ValidationError):
validate_data(
NodeContributorsCreateSerializer(),
preprint_published,
user_id='abcde',
email='<EMAIL>')
# test_add_contributor_validation_user_id_fullname_email
with pytest.raises(exceptions.ValidationError):
validate_data(
NodeContributorsCreateSerializer(),
preprint_published,
user_id='abcde',
full_name='Kanye',
email='<EMAIL>')
# test_add_contributor_validation_fullname
validate_data(
NodeContributorsCreateSerializer(),
preprint_published,
full_name='Kanye')
# test_add_contributor_validation_email
with pytest.raises(exceptions.ValidationError):
validate_data(
NodeContributorsCreateSerializer(),
preprint_published,
email='<EMAIL>')
# test_add_contributor_validation_fullname_email
validate_data(
NodeContributorsCreateSerializer(),
preprint_published,
full_name='Kanye',
email='<EMAIL>')
@pytest.mark.django_db
@pytest.mark.enable_quickfiles_creation
@pytest.mark.enable_enqueue_task
class TestPreprintContributorCreateEmail(NodeCRUDTestCase):
@pytest.fixture()
def url_preprint_contribs(self, preprint_published):
return '/{}preprints/{}/contributors/'.format(API_BASE, preprint_published._id)
@mock.patch('framework.auth.views.mails.send_mail')
def test_add_contributor_no_email_if_false(
self, mock_mail, app, user, url_preprint_contribs):
url = '{}?send_email=false'.format(url_preprint_contribs)
payload = {
'data': {
'type': 'contributors',
'attributes': {
'full_name': '<NAME>',
'email': '<EMAIL>'
}
}
}
res = app.post_json_api(url, payload, auth=user.auth)
assert res.status_code == 201
assert mock_mail.call_count == 0
@mock.patch('framework.auth.views.mails.send_mail')
def test_add_contributor_needs_preprint_filter_to_send_email(
self, mock_mail, app, user, user_two,
url_preprint_contribs):
url = '{}?send_email=default'.format(url_preprint_contribs)
payload = {
'data': {
'type': 'contributors',
'attributes': {
},
'relationships': {
'users': {
'data': {
'type': 'users',
'id': user_two._id
}
}
}
}
}
res = app.post_json_api(url, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'default is not a valid email preference.'
assert mock_mail.call_count == 0
@mock.patch('website.project.signals.contributor_added.send')
def test_add_contributor_signal_if_preprint(
self, mock_send, app, user, user_two, url_preprint_contribs):
url = '{}?send_email=preprint'.format(url_preprint_contribs)
payload = {
'data': {
'type': 'contributors',
'attributes': {
},
'relationships': {
'users': {
'data': {
'type': 'users',
'id': user_two._id
}
}
}
}
}
res = app.post_json_api(url, payload, auth=user.auth)
args, kwargs = mock_send.call_args
assert res.status_code == 201
assert mock_send.call_count == 1
assert 'preprint' == kwargs['email_template']
@mock.patch('framework.auth.views.mails.send_mail')
def test_add_unregistered_contributor_sends_email(
self, mock_mail, app, user, url_preprint_contribs):
url = '{}?send_email=preprint'.format(url_preprint_contribs)
payload = {
'data': {
'type': 'contributors',
'attributes': {
'full_name': '<NAME>',
'email': '<EMAIL>'
}
}
}
res = app.post_json_api(url, payload, auth=user.auth)
assert res.status_code == 201
assert mock_mail.call_count == 1
@mock.patch('website.project.signals.unreg_contributor_added.send')
def test_add_unregistered_contributor_signal_if_preprint(
self, mock_send, app, user, url_preprint_contribs):
url = '{}?send_email=preprint'.format(url_preprint_contribs)
payload = {
'data': {
'type': 'contributors',
'attributes': {
'full_name': '<NAME>',
'email': '<EMAIL>'
}
}
}
res = app.post_json_api(url, payload, auth=user.auth)
args, kwargs = mock_send.call_args
assert res.status_code == 201
assert 'preprint' == kwargs['email_template']
assert mock_send.call_count == 1
@mock.patch('framework.auth.views.mails.send_mail')
def test_add_contributor_invalid_send_email_param(
self, mock_mail, app, user, url_preprint_contribs):
url = '{}?send_email=true'.format(url_preprint_contribs)
payload = {
'data': {
'type': 'contributors',
'attributes': {
'full_name': '<NAME>',
'email': '<EMAIL>'
}
}
}
res = app.post_json_api(
url, payload, auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'true is not a valid email preference.'
assert mock_mail.call_count == 0
@mock.patch('framework.auth.views.mails.send_mail')
def test_add_unregistered_contributor_without_email_no_email(
self, mock_mail, app, user, url_preprint_contribs):
url = '{}?send_email=preprint'.format(url_preprint_contribs)
payload = {
'data': {
'type': 'contributors',
'attributes': {
'full_name': '<NAME>',
}
}
}
with capture_signals() as mock_signal:
res = app.post_json_api(url, payload, auth=user.auth)
assert contributor_added in mock_signal.signals_sent()
assert res.status_code == 201
assert mock_mail.call_count == 0
@mock.patch('framework.auth.views.mails.send_mail')
@mock.patch('osf.models.preprint.update_or_enqueue_on_preprint_updated')
def test_publishing_preprint_sends_emails_to_contributors(
self, mock_update, mock_mail, app, user, url_preprint_contribs, preprint_unpublished):
url = '/{}preprints/{}/'.format(API_BASE, preprint_unpublished._id)
user_two = AuthUserFactory()
preprint_unpublished.add_contributor(user_two, permissions=permissions.WRITE, save=True)
payload = {
'data': {
'id': preprint_unpublished._id,
'type': 'preprints',
'attributes': {
'is_published': True
}
}
}
with capture_signals() as mock_signal:
res = app.patch_json_api(url, payload, auth=user.auth)
assert res.status_code == 200
assert contributor_added in mock_signal.signals_sent()
assert mock_update.called
@mock.patch('website.project.signals.unreg_contributor_added.send')
def test_contributor_added_signal_not_specified(
self, mock_send, app, user, url_preprint_contribs):
payload = {
'data': {
'type': 'contributors',
'attributes': {
'full_name': '<NAME>',
'email': '<EMAIL>'
}
}
}
res = app.post_json_api(url_preprint_contribs, payload, auth=user.auth)
args, kwargs = mock_send.call_args
assert res.status_code == 201
assert 'preprint' == kwargs['email_template']
assert mock_send.call_count == 1
@mock.patch('framework.auth.views.mails.send_mail')
def test_contributor_added_not_sent_if_unpublished(
self, mock_mail, app, user, preprint_unpublished):
url = '/{}preprints/{}/contributors/?send_email=preprint'.format(API_BASE, preprint_unpublished._id)
payload = {
'data': {
'type': 'contributors',
'attributes': {
'full_name': '<NAME>',
'email': '<EMAIL>'
}
}
}
res = app.post_json_api(url, payload, auth=user.auth)
assert res.status_code == 201
assert mock_mail.call_count == 0
@pytest.mark.django_db
@pytest.mark.enable_quickfiles_creation
class TestPreprintContributorBulkCreate(NodeCRUDTestCase):
@pytest.fixture()
def user_three(self):
return AuthUserFactory()
@pytest.fixture()
def url_published(self, preprint_published):
return '/{}preprints/{}/contributors/?send_email=false'.format(
API_BASE, preprint_published._id)
@pytest.fixture()
def url_unpublished(self, preprint_unpublished):
return '/{}preprints/{}/contributors/?send_email=false'.format(
API_BASE, preprint_unpublished._id)
@pytest.fixture()
def payload_one(self, user_two):
return {
'type': 'contributors',
'attributes': {
'bibliographic': True,
'permission': permissions.ADMIN
},
'relationships': {
'users': {
'data': {
'id': user_two._id,
'type': 'users'
}
}
}
}
@pytest.fixture()
def payload_two(self, user_three):
return {
'type': 'contributors',
'attributes': {
'bibliographic': False,
'permission': permissions.READ
},
'relationships': {
'users': {
'data': {
'id': user_three._id,
'type': 'users'
}
}
}
}
def test_preprint_contributor_bulk_create_contributor_exists(
self, app, user, user_two, preprint_published,
payload_one, payload_two, url_published):
preprint_published.add_contributor(
user_two,
permissions=permissions.READ,
visible=True, save=True)
res = app.post_json_api(
url_published,
{'data': [payload_two, payload_one]},
auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert 'is already a contributor' in res.json['errors'][0]['detail']
res = app.get(url_published, auth=user.auth)
assert len(res.json['data']) == 2
def test_preprint_contributor_bulk_create_errors(
self, app, user, user_two, preprint_unpublished,
payload_one, payload_two, url_published, url_unpublished):
# test_bulk_create_contributors_blank_request
res = app.post_json_api(
url_published, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
# test_preprint_contributor_bulk_create_logged_out_published_preprint
res = app.post_json_api(
url_published,
{'data': [payload_one, payload_two]},
expect_errors=True, bulk=True)
assert res.status_code == 401
res = app.get(url_published, auth=user.auth)
assert len(res.json['data']) == 1
# test_preprint_contributor_bulk_create_logged_out_unpublished_preprint
res = app.post_json_api(
url_unpublished,
{'data': [payload_one, payload_two]},
expect_errors=True, bulk=True)
assert res.status_code == 401
res = app.get(url_unpublished, auth=user.auth)
assert len(res.json['data']) == 1
# test_preprint_contributor_bulk_create_logged_in_non_contrib_unpublished_preprint
res = app.post_json_api(url_unpublished, {'data': [payload_one, payload_two]},
auth=user_two.auth, expect_errors=True, bulk=True)
assert res.status_code == 403
res = app.get(url_published, auth=user.auth)
assert len(res.json['data']) == 1
# test_preprint_contributor_bulk_create_logged_in_read_only_contrib_unpublished_preprint
preprint_unpublished.add_contributor(
user_two, permissions=permissions.READ, save=True)
res = app.post_json_api(
url_unpublished,
{'data': [payload_two]},
auth=user_two.auth,
expect_errors=True, bulk=True)
assert res.status_code == 403
res = app.get(url_published, auth=user.auth)
assert len(res.json['data']) == 1
def test_preprint_contributor_bulk_create_logged_in_published_preprint(
self, app, user, payload_one, payload_two, url_published):
res = app.post_json_api(
url_published,
{'data': [payload_one, payload_two]},
auth=user.auth, bulk=True)
assert res.status_code == 201
assert_equals([res.json['data'][0]['attributes']['bibliographic'],
res.json['data'][1]['attributes']['bibliographic']], [True, False])
assert_equals([res.json['data'][0]['attributes']['permission'],
res.json['data'][1]['attributes']['permission']], [permissions.ADMIN, permissions.READ])
assert res.content_type == | |
"""Module contains base class for prompts.
BaseSimplePrompt ← InputPrompt,
↑ ↑
↑ SecretPrompt ...
↑
BaseComplexPrompt ← FuzzyPrompt
↑
BaseListPrompt ← ListPrompt, ExpandPrompt ...
"""
import os
import re
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, List, NamedTuple, Tuple, Union
from prompt_toolkit.application import Application
from prompt_toolkit.enums import EditingMode
from prompt_toolkit.filters import IsDone
from prompt_toolkit.filters.base import Condition, FilterOrBool
from prompt_toolkit.key_binding.key_bindings import KeyBindings, KeyHandlerCallable
from prompt_toolkit.keys import Keys
from prompt_toolkit.layout.containers import ConditionalContainer, HSplit, Window
from prompt_toolkit.layout.controls import FormattedTextControl
from prompt_toolkit.layout.dimension import Dimension, LayoutDimension
from prompt_toolkit.layout.layout import Layout
from prompt_toolkit.styles.style import Style
from prompt_toolkit.validation import ValidationError, Validator
from InquirerPy.enum import INQUIRERPY_KEYBOARD_INTERRUPT
from InquirerPy.exceptions import InvalidArgument, RequiredKeyNotFound
from InquirerPy.separator import Separator
from InquirerPy.utils import InquirerPyStyle, SessionResult, calculate_height, get_style
__all__ = [
"BaseSimplePrompt",
"BaseComplexPrompt",
"BaseListPrompt",
"InquirerPyUIControl",
]
class BaseSimplePrompt(ABC):
"""The base class for simple prompts.
Inherit this class to create a simple prompt that leverage `prompt_toolkit`
PromptSession.
Note: the PromptSession is not initialised in the constructor, require
a call of `self.session = PromptSession(...)`.
:param message: the question message to display
:type message: Union[str, Callable[[SessionResult], str]]
:param style: the style dictionary to apply
:type style: InquirerPyStyle
:param vi_mode: use vi kb for the prompt
:type vi_mode: str
:param qmark: the custom qmark to display infront of the question
:type qmark: str
:param validate: a callable or Validator instance to validate user input
:type validate: Union[Callable[[Any], bool], Validator]
:param invalid_message: message to display when input is invalid
:type invalid_message: str
:param transformer: a callable to transform the result, this is visual effect only
:type transformer: Callable[[Any], Any]
:param filter: a callable to filter the result, updating the user input before returning the result
:type filter: Callable[[Any], Any]
:param session_result: the current session result, this is used by callable message and choices
to generate dynamic values. If using alternate syntax, skip this value.
:type session_result: SessionResult
"""
def __init__(
self,
message: Union[str, Callable[[SessionResult], str]],
style: InquirerPyStyle = None,
vi_mode: bool = False,
qmark: str = "?",
validate: Union[Callable[[Any], bool], Validator] = None,
invalid_message: str = "Invalid input",
transformer: Callable[[Any], Any] = None,
filter: Callable[[Any], Any] = None,
session_result: SessionResult = None,
default: Any = "",
) -> None:
"""Construct the base class for simple prompts."""
self._result = session_result or {}
self._message = message if not isinstance(message, Callable) else message(self._result) # type: ignore
self._default = (
default if not isinstance(default, Callable) else default(self._result)
)
self._style = Style.from_dict(style.dict if style else get_style().dict)
self._qmark = qmark
self._status = {"answered": False, "result": None}
self._kb = KeyBindings()
self._lexer = "class:input"
self._transformer = transformer
self._filter = filter
self._editing_mode = (
EditingMode.VI
if vi_mode or bool(os.getenv("INQUIRERPY_VI_MODE", False))
else EditingMode.EMACS
)
if isinstance(validate, Validator):
self._validator = validate
else:
self._validator = Validator.from_callable(
validate if validate else lambda _: True,
invalid_message,
move_cursor_to_end=True,
)
@self._kb.add("c-c")
def _(event) -> None:
self.status["answered"] = True
self.status["result"] = INQUIRERPY_KEYBOARD_INTERRUPT
event.app.exit(result=INQUIRERPY_KEYBOARD_INTERRUPT)
@property
def status(self) -> Dict[str, Any]:
"""Get status value of the prompt."""
return self._status
@status.setter
def status(self, value) -> None:
"""Set status value of the prompt."""
self._status = value
def register_kb(
self, *keys: Union[Keys, str], filter: FilterOrBool = True
) -> Callable[[KeyHandlerCallable], KeyHandlerCallable]:
"""Decorate keybinding registration function.
Format all alt related keybindings.
Due to `prompt_toolkit` doesn't process alt related keybindings,
it requires alt-ANY to "escape" + "ANY".
Check a list of keys argument if they are alt related, change
them to escape.
:param keys: keys to bind into the keybindings
:type keys: Union[Keys, str]
:param filter: condition of whether this keybinding should be active
:type filter: FilterOrBool
:return: a decorator that should be applied to the function thats intended
to be active when the keys being pressed
:rtype: Callable[[KeyHandlerCallable], KeyHandlerCallable]
"""
alt_pattern = re.compile(r"^alt-(.*)")
def decorator(func: KeyHandlerCallable) -> KeyHandlerCallable:
formatted_keys = []
for key in keys:
match = alt_pattern.match(key)
if match:
formatted_keys.append("escape")
formatted_keys.append(match.group(1))
else:
formatted_keys.append(key)
@self._kb.add(*formatted_keys, filter=filter)
def executable(event) -> None:
func(event)
return executable
return decorator
@abstractmethod
def _get_prompt_message(
self, pre_answer: Tuple[str, str], post_answer: Tuple[str, str]
) -> List[Tuple[str, str]]:
"""Return the formatted text to display in the prompt.
Leveraging the nature of Dict in python, we can dynamically update the prompt
message of the PromptSession.
This is useful to format/customize user input for better visual.
:param pre_answer: the information to display before answering the question
:type pre_answer: Tuple[str, str]
:param post_answer: the information to display after answering the question
:type post_answer: Tuple[str, str]
:return: formatted text thats ready to be consumed by PromptSession
:rtype: List[Tuple[str, str]]
"""
display_message = []
if self.status["result"] == INQUIRERPY_KEYBOARD_INTERRUPT:
display_message.append(
("class:skipped", "%s %s " % (self._qmark, self._message))
)
else:
display_message.append(("class:questionmark", self._qmark))
display_message.append(("class:question", " %s" % self._message))
if self.status["answered"]:
display_message.append(
post_answer
if not self._transformer
else (
"class:answer",
" %s" % self._transformer(self.status["result"]),
)
)
else:
display_message.append(pre_answer)
return display_message
@abstractmethod
def execute(self) -> Any:
"""Abstractmethod to enforce a execute function is implemented for eaiser management.
All prompt instance require a execute call to initialised the `PromptSession` or `Application`.
This is being called in the resolver.
"""
pass
class InquirerPyUIControl(FormattedTextControl):
"""A UIControl class intended to be consumed by `prompt_toolkit` window.
Dynamically adapt to user input and update formatted text.
:param choices: list of choices to display as the content
:type choices: Union[Callable[[SessionResult], List[Any]], List[Any]],
:param default: default value, will impact the cursor position
:type default: Any
"""
def __init__(
self,
choices: Union[Callable[[SessionResult], List[Any]], List[Any]],
default: Any = None,
session_result: SessionResult = None,
) -> None:
"""Initialise choices and construct a FormattedTextControl object."""
self._session_result = session_result or {}
self._selected_choice_index: int = 0
self._choice_func = None
self._loading = False
self._raw_choices = []
self._default = (
default
if not isinstance(default, Callable)
else default(self._session_result)
)
if isinstance(choices, Callable):
self._loading = True
self._choices = []
self._choice_func = choices
self._loading = True
else:
self._raw_choices = choices
self._choices = self._get_choices(choices, self._default) # type: ignore
self._safety_check()
self._format_choices()
super().__init__(self._get_formatted_choices)
def _retrieve_choices(self) -> None:
"""Retrieve the callable choices and format them.
Should be called in the `after_render` call in `Application`.
:param session_result: the current result of the prompt session,
if using alternate syntax, skip this value
:type session_result: SessionResult
"""
self._raw_choices = self._choice_func(self._session_result) # type: ignore
self.choices = self._get_choices(self._raw_choices, self._default)
self._loading = False
self._safety_check()
self._format_choices()
def _get_choices(self, choices: List[Any], default: Any) -> List[Dict[str, Any]]:
"""Process the raw user input choices and format it into dictionary.
:param choices: list of choices to display
:type choices: List[Union[str, Dict[str, Any]]]
:param default: default value, this affect selected_choice_index
:type default: Any
:return: formatted choices
:rtype: List[Dict[str, Any]]
"""
processed_choices: List[Dict[str, Any]] = []
try:
for index, choice in enumerate(choices, start=0):
if isinstance(choice, dict):
if choice["value"] == default:
self.selected_choice_index = index
processed_choices.append(
{
"name": str(choice["name"]),
"value": choice["value"],
"enabled": False,
}
)
elif isinstance(choice, Separator):
if self.selected_choice_index == index:
self.selected_choice_index = (
self.selected_choice_index + 1
) % len(choices)
processed_choices.append(
{"name": str(choice), "value": choice, "enabled": False}
)
else:
if choice == default:
self.selected_choice_index = index
processed_choices.append(
{"name": str(choice), "value": choice, "enabled": False}
)
except KeyError:
raise RequiredKeyNotFound(
"dictionary choice require a name key and a value key."
)
return processed_choices
@property
def selected_choice_index(self) -> int:
"""Get current highlighted index."""
return self._selected_choice_index
@selected_choice_index.setter
def selected_choice_index(self, value) -> None:
"""Set index to highlight."""
self._selected_choice_index = value
@property
def choices(self) -> List[Dict[str, Any]]:
"""Get all processed choices."""
return self._choices
@choices.setter
def choices(self, value) -> None:
"""Set processed choices."""
self._choices = value
def _safety_check(self) -> None:
"""Validate choices, check empty or all Separator."""
if not self.choices:
raise InvalidArgument("choices cannot be empty.")
should_proceed: bool = False
for choice in self.choices:
if not isinstance(choice["value"], Separator):
should_proceed = True
break
if not should_proceed:
raise InvalidArgument(
"choices should contain content other than separator."
)
def _get_formatted_choices(self) -> List[Tuple[str, str]]:
"""Get all choices in formatted text format.
:return: a list of formatted choices
:rtype: List[Tuple[str, str]]
"""
display_choices = []
for index, choice in enumerate(self.choices):
if index == self.selected_choice_index:
display_choices += self._get_hover_text(choice)
else:
display_choices += self._get_normal_text(choice)
display_choices.append(("", "\n"))
if display_choices:
display_choices.pop()
return display_choices
@abstractmethod
def _format_choices(self) -> None:
"""Perform post processing on the choices.
Customise the choices after `self._get_choices` call.
"""
pass
@abstractmethod
def _get_hover_text(self, choice) -> List[Tuple[str, str]]:
"""Generate the formatted text for hovered choice.
:return: list of formatted | |
client.get_channel(channel).send('< ' + bossData[i][0] + ' 카톡 보내기 켬>', tts=False)
################ 전체 카톡 끔 ################
if message.content == '!카톡끔':
basicSetting[9] = '0'
InitkatalkData[2] = 'kakaoOnOff = ' + basicSetting[9] +'\n'
tmp_katalkData = open('test_setting.ini', 'w', encoding = 'utf-8')
tmp_katalkData.writelines(InitkatalkData)
tmp_katalkData.close()
await client.get_channel(channel).send('<카톡 보내기 끔>', tts=False)
if message.content == '!카톡켬':
basicSetting[9] = '1'
InitkatalkData[2] = 'kakaoOnOff = ' + basicSetting[9] +'\n'
tmp_katalkData = open('test_setting.ini', 'w', encoding = 'utf-8')
tmp_katalkData.writelines(InitkatalkData)
tmp_katalkData.close()
await client.get_channel(channel).send('<카톡 보내기 켬>', tts=False)
################ ?????????????? ################
if message.content.startswith('!오빠'):
await PlaySound(voice_client1, './sound/오빠.mp3')
if message.content.startswith('!언니'):
await PlaySound(voice_client1, './sound/언니.mp3')
if message.content.startswith('!형'):
await PlaySound(voice_client1, './sound/형.mp3')
################ 분배 결과 출력 ################
if message.content.startswith('!분배'):
separate_money = []
separate_money = message.content[4:].split(" ")
num_sep = int(separate_money[0])
cal_tax1 = int(float(separate_money[1])*0.05)
real_money = int(int(separate_money[1]) - cal_tax1)
cal_tax2 = int(real_money/num_sep) - int(float(int(real_money/num_sep))*0.95)
if num_sep == 0 :
await client.get_channel(channel).send('분배 인원이 0입니다. 재입력 해주세요.', tts=False)
else :
await client.get_channel(channel).send('1차세금 : ' + str(cal_tax1) + '\n1차 수령액 : ' + str(real_money) + '\n분배자 거래소등록금액 : ' + str(int(real_money/num_sep)) + '\n2차세금 : ' + str(cal_tax2) + '\n인당 실수령액 : ' + str(int(float(int(real_money/num_sep))*0.95)), tts=False)
################ 사다리 결과 출력 ################
if message.content.startswith('!사다리'):
ladder = []
ladder = message.content[5:].split(" ")
num_cong = int(ladder[0])
del(ladder[0])
await LadderFunc(num_cong, ladder, client.get_channel(channel))
################ 보탐봇 메뉴 출력 ################
if message.content == '!메뉴':
embed = discord.Embed(
title = "----- 메뉴 -----",
description= '```!설정확인\n!카톡확인\n!채널확인\n!채널이동 [채널명]\n!소환\n!불러오기\n!초기화\n!재시작\n!명치\n!미예약\n!분배 [인원] [금액]\n!사다리 [뽑을인원수] [아이디1] [아이디2] ...\n!보스일괄 00:00 또는 !보스일괄 0000\n!카톡켬\n!카톡끔\n!ㅂ,ㅃ,q\n!k,ㅏ (할말)\n\n[보스명]컷\n[보스명]컷 00:00 또는 [보스명]컷 0000\n[보스명]멍\n[보스명]멍 00:00 또는 [보스명]멍 0000\n[보스명]예상 00:00 또는 [보스명]예상 0000\n[보스명]카톡켬\n[보스명]카톡끔\n[보스명]삭제\n보스탐\n!카톡보스\n!보스탐\n!리젠```',
color=0xff00ff
)
embed.add_field(
name="----- 추가기능 -----",
value= '```(보스명)컷/멍/예상 (할말) : 보스시간 입력 후 빈칸 두번!! 메모 가능```'
)
await client.get_channel(channel).send( embed=embed, tts=False)
################ 미예약 보스타임 출력 ################
if message.content == '!미예약':
temp_bossTime2 = []
for i in range(bossNum):
if bossTimeString[i] == '99:99:99' :
temp_bossTime2.append(bossData[i][0])
if len(temp_bossTime2) != 0:
temp_bossTimeSTR1 = ','.join(map(str, temp_bossTime2))
temp_bossTimeSTR1 = '```fix\n' + temp_bossTimeSTR1 + '\n```'
else:
temp_bossTimeSTR1 = '``` ```'
embed = discord.Embed(
title = "----- 미예약보스 -----",
description= temp_bossTimeSTR1,
color=0x0000ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
################ 음성파일 생성 후 재생 ################
if message.content.startswith('!v') or message.content.startswith('!ㅍ'):
tmp_sayMessage = message.content
sayMessage = tmp_sayMessage[3:]
await MakeSound(message.author.display_name +'님이' + sayMessage, './sound/say')
await client.get_channel(channel).send( "```< " + msg.author.display_name + " >님이 \"" + sayMessage + "\"```", tts=False)
await PlaySound(voice_client1, './sound/say.mp3')
################ 카톡으로 메세지 보내기 ################
if message.content.startswith('!k') or message.content.startswith('!ㅏ'):
tmp_sayMessage = message.content
sayMessage = tmp_sayMessage[3:]
KakaoSendMSG(basicSetting[8], message.author.display_name + ' : ' + sayMessage, basicSetting[9], '1')
################ 보탐봇 재시작 ################
if message.content == '!재시작':
if basicSetting[2] != '0':
for i in range(bossNum):
if bossMungFlag[i] == True:
bossTimeString[i] = tmp_bossTime[i].strftime('%H:%M:%S')
bossDateString[i] = tmp_bossTime[i].strftime('%Y-%m-%d')
await dbSave()
await client.get_channel(channel).send( '<보탐봇 재시작 중! 접속완료 후 명령어 입력 해주세요!>', tts=False)
os.system('restart.bat')
#sys.exit()
################ 보탐봇 음성채널 소환 ################
if message.content == '!소환':
if message.author.voice == None:
await client.get_channel(channel).send('음성채널에 먼저 들어가주세요.', tts=False)
else:
voice_channel = message.author.voice.channel
print ('< ', basicSetting[6], ' >')
print ('> ', client.get_channel(voice_channel.id).name, ' <')
if basicSetting[6] == "":
inidata_voice = open('test_setting.ini', 'r', encoding = 'utf-8')
inputData_voice = inidata_voice.readlines()
inidata_voice.close()
inidata_voice = open('test_setting.ini', 'w', encoding = 'utf-8')
for i in range(len(inputData_voice)):
if inputData_voice[i] == 'voicechannel = \n':
inputData_voice[i] = 'voicechannel = ' + str(voice_channel.id) + '\n'
basicSetting[6] = int(voice_channel.id)
#print ('======', inputData_voice[i])
inidata_voice.writelines(inputData_voice)
inidata_voice.close()
elif basicSetting[6] != int(voice_channel.id):
inidata_voice = open('test_setting.ini', 'r', encoding = 'utf-8')
inputData_voice = inidata_voice.readlines()
inidata_voice.close()
inidata_voice = open('test_setting.ini', 'w', encoding = 'utf-8')
for i in range(len(inputData_voice)):
if inputData_voice[i] == 'voicechannel = ' + str(basicSetting[6]) + '\n':
inputData_voice[i] = 'voicechannel = ' + str(voice_channel.id) + '\n'
basicSetting[6] = int(voice_channel.id)
#print ('+++++++', inputData_voice[i])
inidata_voice.writelines(inputData_voice)
inidata_voice.close()
await JointheVC(voice_channel, channel)
await client.get_channel(channel).send( '< 음성채널 [' + client.get_channel(voice_channel.id).name + '] 접속완료>', tts=False)
################ 저장된 정보 초기화 ################
if message.content == '!초기화':
basicSetting = []
bossData = []
bossTime = []
tmp_bossTime = []
fixed_bossTime = []
bossTimeString = []
bossDateString = []
tmp_bossTimeString = []
tmp_bossDateString = []
bossFlag = []
bossFlag0 = []
fixed_bossFlag = []
fixed_bossFlag0 = []
bossMungFlag = []
bossMungCnt = []
katalkData = []
InitkatalkData = []
indexBossname = []
init()
await dbSave()
await client.get_channel(channel).send( '<초기화 완료>', tts=False)
print ("<초기화 완료>")
################ 보스타임 일괄 설정 ################
if message.content.startswith('!보스일괄'):
for i in range(bossNum):
tmp_msg = '!보스일괄'
if len(hello) > len(tmp_msg) + 3 :
if hello.find(':') != -1 :
chkpos = hello.find(':')
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos+1:chkpos+3]
now2 = datetime.datetime.now()
tmp_now = datetime.datetime.now()
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
chkpos = len(hello)-2
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos:chkpos+2]
now2 = datetime.datetime.now()
tmp_now = datetime.datetime.now()
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
now2 = datetime.datetime.now()
tmp_now = now2
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 1
if tmp_now > now2 :
tmp_now = tmp_now + datetime.timedelta(days=int(-1))
if tmp_now < now2 :
deltaTime = datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
while now2 > tmp_now :
tmp_now = tmp_now + deltaTime
bossMungCnt[i] = bossMungCnt[i] + 1
now2 = tmp_now
bossMungCnt[i] = bossMungCnt[i] - 1
else :
now2 = now2 + datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
tmp_bossTime[i] = bossTime[i] = nextTime = now2
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
await dbSave()
await dbLoad()
await dbSave()
await client.get_channel(channel).send( '<보스 일괄 입력 완료>', tts=False)
print ("<보스 일괄 입력 완료>")
################ 보탐봇 기본 설정확인 ################
if message.content == '!설정확인':
setting_val = '보탐봇 재시작 설정시간 : ' + basicSetting[4] + '시 ' + basicSetting[5] + '분\n' + '보스젠알림시간1 : ' + basicSetting[1] + ' 분 전\n' + '보스젠알림시간2 : ' + basicSetting[3] + ' 분 전\n' + '보스멍확인시간 : ' + basicSetting[2] + ' 분 후\n'
embed = discord.Embed(
title = "----- 설정내용 -----",
description= setting_val,
color=0xff00ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
print ('보스젠알림시간1 : ', basicSetting[1])
print ('보스젠알림시간2 : ', basicSetting[3])
print ('보스멍확인시간 : ', basicSetting[2])
################ 카톡 설정 확인 ################
if message.content == '!카톡확인':
katalkInformation = ''
if basicSetting[9] == '0' :
katalkInformation = '전체카톡 : 꺼짐\n'
else :
katalkInformation = '전체카톡 : 켜짐\n'
katalkInformation += '---------------------\n'
for i in range(bossNum):
for j in range(bossNum):
if bossTimeString[i] and bossTimeString[j] != '99:99:99':
if bossTimeString[i] == bossTimeString[j] and i != j:
tmp_time1 = bossTimeString[j][:6]
tmp_time2 = (int(bossTimeString[j][6:]) + 1)%100
if tmp_time2 < 10 :
tmp_time22 = '0' + str(tmp_time2)
elif tmp_time2 == 60 :
tmp_time22 = '00'
else :
tmp_time22 = str(tmp_time2)
bossTimeString[j] = tmp_time1 + tmp_time22
datelist2 = bossTime
datelist = list(set(datelist2))
for timestring in sorted(datelist):
for i in range(bossNum):
if timestring == bossTime[i]:
if bossTimeString[i] != '99:99:99' :
if bossData[i][6] == '0':
katalkInformation += bossData[i][0] + " 카톡 : 꺼짐\n"
else :
katalkInformation += bossData[i][0] + " 카톡 : 켜짐\n"
embed = discord.Embed(
title = "----- 카톡설정내용 -----",
description= katalkInformation,
color=0xff00ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
################ my_bot.db에 저장된 보스타임 불러오기 ################
if message.content == '!불러오기':
await dbLoad()
if LoadChk == 0:
await client.get_channel(channel).send( '<불러오기 완료>', tts=False)
else:
await client.get_channel(channel).send( '<보스타임 정보가 없습니다.>', tts=False)
################ 가장 근접한 보스타임 출력 ################
if message.content == '!ㅂ' or message.content == '!q' or message.content == '!ㅃ' or message.content == '!Q':
await dbLoad()
checkTime = datetime.datetime.now() + datetime.timedelta(days=1)
sorted_datelist = []
datelist = bossTime
tmp_sorted_datelist = sorted(datelist)
for i in range(len(tmp_sorted_datelist)):
if checkTime > tmp_sorted_datelist[i]:
sorted_datelist.append(tmp_sorted_datelist[i])
if len(sorted_datelist) == 0:
await client.get_channel(channel).send( '<보스타임 정보가 없습니다.>', tts=False)
else :
result_lefttime = ''
if len(sorted_datelist) > int(basicSetting[11]):
for j in range(int(basicSetting[11])):
for i in range(bossNum):
if sorted_datelist[j] == bossTime[i]:
leftTime = bossTime[i] - datetime.datetime.now()
total_seconds = int(leftTime.total_seconds())
hours, remainder = divmod(total_seconds,60*60)
minutes, seconds = divmod(remainder,60)
result_lefttime += '다음 ' + bossData[i][0] + '탐까지 %02d:%02d:%02d 남았습니다. ' % (hours,minutes,seconds) + '[' + bossTimeString[i] + ']\n'
else :
for j in range(len(sorted_datelist)):
for i in range(bossNum):
if sorted_datelist[j] == bossTime[i]:
leftTime = bossTime[i] - datetime.datetime.now()
total_seconds = int(leftTime.total_seconds())
hours, remainder = divmod(total_seconds,60*60)
minutes, seconds = divmod(remainder,60)
result_lefttime += '다음 ' + bossData[i][0] + '탐까지 %02d:%02d:%02d 남았습니다. ' % (hours,minutes,seconds) + '[' + bossTimeString[i] + ']\n'
#result_lefttime += bossData[i][0] + '탐[' + bossTimeString[i] + ']까지 ' + '%02d:%02d:%02d 남았습니다.\n' % (hours,minutes,seconds)
embed = discord.Embed(
description= result_lefttime,
color=0xff0000
)
await client.get_channel(channel).send( embed=embed, tts=False)
datelist = []
################ 보스타임 출력 ################
if message.content == '보스탐' or message.content == '/?' or message.content == '/보스' :
datelist = []
datelist2 = []
temp_bossTime1 = []
ouput_bossData = []
aa = []
for i in range(bossNum):
if bossMungFlag[i] == True :
datelist2.append(tmp_bossTime[i])
else :
datelist2.append(bossTime[i])
for i in range(fixed_bossNum):
if fixed_bossTime[i] < datetime.datetime.now() + datetime.timedelta(hours= 3):
datelist2.append(fixed_bossTime[i])
datelist = list(set(datelist2))
for i in range(bossNum):
if bossTimeString[i] == '99:99:99' and bossMungFlag[i] != True :
temp_bossTime1.append(bossData[i][0])
else :
aa.append(bossData[i][0]) #output_bossData[0] : 보스명
if bossMungFlag[i] == True :
aa.append(tmp_bossTime[i]) #output_bossData[1] : 시간
aa.append(tmp_bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00)
aa.append('-') #output_bossData[3] : -
else :
aa.append(bossTime[i]) #output_bossData[1] : 시간
aa.append(bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00)
aa.append('+') #output_bossData[3] : +
aa.append(bossData[i][2]) #output_bossData[4] : 멍/미입력 보스
aa.append(bossMungCnt[i]) #output_bossData[5] : 멍/미입력횟수
aa.append(bossData[i][7]) #output_bossData[6] : 메세지
ouput_bossData.append(aa)
aa = []
for i in range(fixed_bossNum):
aa.append(fixed_bossData[i][0]) #output_bossData[0] : 보스명
aa.append(fixed_bossTime[i]) #output_bossData[1] : 시간
aa.append(fixed_bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00)
aa.append('@') #output_bossData[3] : @
aa.append(0) #output_bossData[4] : 멍/미입력 보스
aa.append(0) #output_bossData[5] : 멍/미입력횟수
aa.append("") #output_bossData[6] : 메세지
ouput_bossData.append(aa)
aa = []
if len(temp_bossTime1) != 0:
temp_bossTimeSTR1 = ','.join(map(str, temp_bossTime1))
temp_bossTimeSTR1 = '```fix\n' + temp_bossTimeSTR1 + '\n```'
else:
temp_bossTimeSTR1 = '``` ```'
information = ''
for timestring in sorted(datelist):
for i in range(len(ouput_bossData)):
if timestring == ouput_bossData[i][1]:
if ouput_bossData[i][4] == '0' :
if ouput_bossData[i][5] == 0 :
information += ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' ' + ouput_bossData[i][6] + '\n'
else :
information += ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' (미 ' + str(ouput_bossData[i][5]) + '회)' + ' ' + ouput_bossData[i][6] + '\n'
else :
if ouput_bossData[i][5] == 0 :
information += ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' ' + ouput_bossData[i][6] + '\n'
else :
information += ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' (멍 ' + str(ouput_bossData[i][5]) + '회)' + ' ' + ouput_bossData[i][6] + '\n'
if len(information) != 0:
information = "```diff\n" + information + "\n```"
else :
information = '``` ```'
embed = | |
def _recv_getGroupsForChannel(self, ctx):
self._iprot.read_response_headers(ctx)
_, mtype, _ = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
if x.type == TApplicationExceptionType.RESPONSE_TOO_LARGE:
raise TTransportException(type=TTransportExceptionType.RESPONSE_TOO_LARGE, message=x.message)
raise x
result = getGroupsForChannel_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.success is not None:
return result.success
x = TApplicationException(TApplicationExceptionType.MISSING_RESULT, "getGroupsForChannel failed: unknown result")
raise x
def getUserCreateTime(self, ctx):
"""
Args:
ctx: FContext
"""
return self._methods['getUserCreateTime']([ctx])
def _getUserCreateTime(self, ctx):
self._send_getUserCreateTime(ctx)
return self._recv_getUserCreateTime(ctx)
def _send_getUserCreateTime(self, ctx):
oprot = self._oprot
with self._write_lock:
oprot.get_transport().set_timeout(ctx.timeout)
oprot.write_request_headers(ctx)
oprot.writeMessageBegin('getUserCreateTime', TMessageType.CALL, 0)
args = getUserCreateTime_args()
args.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
def _recv_getUserCreateTime(self, ctx):
self._iprot.read_response_headers(ctx)
_, mtype, _ = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
if x.type == TApplicationExceptionType.RESPONSE_TOO_LARGE:
raise TTransportException(type=TTransportExceptionType.RESPONSE_TOO_LARGE, message=x.message)
raise x
result = getUserCreateTime_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.success is not None:
return result.success
x = TApplicationException(TApplicationExceptionType.MISSING_RESULT, "getUserCreateTime failed: unknown result")
raise x
def registerChannelCP(self, ctx, cpId, registerPassword):
"""
Args:
ctx: FContext
cpId: string
registerPassword: string
"""
return self._methods['registerChannelCP']([ctx, cpId, registerPassword])
def _registerChannelCP(self, ctx, cpId, registerPassword):
self._send_registerChannelCP(ctx, cpId, registerPassword)
return self._recv_registerChannelCP(ctx)
def _send_registerChannelCP(self, ctx, cpId, registerPassword):
oprot = self._oprot
with self._write_lock:
oprot.get_transport().set_timeout(ctx.timeout)
oprot.write_request_headers(ctx)
oprot.writeMessageBegin('registerChannelCP', TMessageType.CALL, 0)
args = registerChannelCP_args()
args.cpId = cpId
args.registerPassword = <PASSWORD>
args.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
def _recv_registerChannelCP(self, ctx):
self._iprot.read_response_headers(ctx)
_, mtype, _ = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
if x.type == TApplicationExceptionType.RESPONSE_TOO_LARGE:
raise TTransportException(type=TTransportExceptionType.RESPONSE_TOO_LARGE, message=x.message)
raise x
result = registerChannelCP_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.success is not None:
return result.success
x = TApplicationException(TApplicationExceptionType.MISSING_RESULT, "registerChannelCP failed: unknown result")
raise x
def reserveCallCreditPurchase(self, ctx, request):
"""
Args:
ctx: FContext
request: CoinPurchaseReservation
"""
return self._methods['reserveCallCreditPurchase']([ctx, request])
def _reserveCallCreditPurchase(self, ctx, request):
self._send_reserveCallCreditPurchase(ctx, request)
return self._recv_reserveCallCreditPurchase(ctx)
def _send_reserveCallCreditPurchase(self, ctx, request):
oprot = self._oprot
with self._write_lock:
oprot.get_transport().set_timeout(ctx.timeout)
oprot.write_request_headers(ctx)
oprot.writeMessageBegin('reserveCallCreditPurchase', TMessageType.CALL, 0)
args = reserveCallCreditPurchase_args()
args.request = request
args.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
def _recv_reserveCallCreditPurchase(self, ctx):
self._iprot.read_response_headers(ctx)
_, mtype, _ = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
if x.type == TApplicationExceptionType.RESPONSE_TOO_LARGE:
raise TTransportException(type=TTransportExceptionType.RESPONSE_TOO_LARGE, message=x.message)
raise x
result = reserveCallCreditPurchase_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.success is not None:
return result.success
x = TApplicationException(TApplicationExceptionType.MISSING_RESULT, "reserveCallCreditPurchase failed: unknown result")
raise x
def acquirePaidCallCurrencyExchangeRate(self, ctx, language):
"""
Args:
ctx: FContext
language: string
"""
return self._methods['acquirePaidCallCurrencyExchangeRate']([ctx, language])
def _acquirePaidCallCurrencyExchangeRate(self, ctx, language):
self._send_acquirePaidCallCurrencyExchangeRate(ctx, language)
return self._recv_acquirePaidCallCurrencyExchangeRate(ctx)
def _send_acquirePaidCallCurrencyExchangeRate(self, ctx, language):
oprot = self._oprot
with self._write_lock:
oprot.get_transport().set_timeout(ctx.timeout)
oprot.write_request_headers(ctx)
oprot.writeMessageBegin('acquirePaidCallCurrencyExchangeRate', TMessageType.CALL, 0)
args = acquirePaidCallCurrencyExchangeRate_args()
args.language = language
args.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
def _recv_acquirePaidCallCurrencyExchangeRate(self, ctx):
self._iprot.read_response_headers(ctx)
_, mtype, _ = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
if x.type == TApplicationExceptionType.RESPONSE_TOO_LARGE:
raise TTransportException(type=TTransportExceptionType.RESPONSE_TOO_LARGE, message=x.message)
raise x
result = acquirePaidCallCurrencyExchangeRate_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.success is not None:
return result.success
x = TApplicationException(TApplicationExceptionType.MISSING_RESULT, "acquirePaidCallCurrencyExchangeRate failed: unknown result")
raise x
def getRoomMemberMidsForAppPlatform(self, ctx, roomId):
"""
Args:
ctx: FContext
roomId: string
"""
return self._methods['getRoomMemberMidsForAppPlatform']([ctx, roomId])
def _getRoomMemberMidsForAppPlatform(self, ctx, roomId):
self._send_getRoomMemberMidsForAppPlatform(ctx, roomId)
return self._recv_getRoomMemberMidsForAppPlatform(ctx)
def _send_getRoomMemberMidsForAppPlatform(self, ctx, roomId):
oprot = self._oprot
with self._write_lock:
oprot.get_transport().set_timeout(ctx.timeout)
oprot.write_request_headers(ctx)
oprot.writeMessageBegin('getRoomMemberMidsForAppPlatform', TMessageType.CALL, 0)
args = getRoomMemberMidsForAppPlatform_args()
args.roomId = roomId
args.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
def _recv_getRoomMemberMidsForAppPlatform(self, ctx):
self._iprot.read_response_headers(ctx)
_, mtype, _ = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
if x.type == TApplicationExceptionType.RESPONSE_TOO_LARGE:
raise TTransportException(type=TTransportExceptionType.RESPONSE_TOO_LARGE, message=x.message)
raise x
result = getRoomMemberMidsForAppPlatform_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.success is not None:
return result.success
x = TApplicationException(TApplicationExceptionType.MISSING_RESULT, "getRoomMemberMidsForAppPlatform failed: unknown result")
raise x
def getPaidCallBalanceList(self, ctx, language):
"""
Args:
ctx: FContext
language: string
"""
return self._methods['getPaidCallBalanceList']([ctx, language])
def _getPaidCallBalanceList(self, ctx, language):
self._send_getPaidCallBalanceList(ctx, language)
return self._recv_getPaidCallBalanceList(ctx)
def _send_getPaidCallBalanceList(self, ctx, language):
oprot = self._oprot
with self._write_lock:
oprot.get_transport().set_timeout(ctx.timeout)
oprot.write_request_headers(ctx)
oprot.writeMessageBegin('getPaidCallBalanceList', TMessageType.CALL, 0)
args = getPaidCallBalanceList_args()
args.language = language
args.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
def _recv_getPaidCallBalanceList(self, ctx):
self._iprot.read_response_headers(ctx)
_, mtype, _ = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
if x.type == TApplicationExceptionType.RESPONSE_TOO_LARGE:
raise TTransportException(type=TTransportExceptionType.RESPONSE_TOO_LARGE, message=x.message)
raise x
result = getPaidCallBalanceList_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.success is not None:
return result.success
x = TApplicationException(TApplicationExceptionType.MISSING_RESULT, "getPaidCallBalanceList failed: unknown result")
raise x
def getPersonalInfos(self, ctx, requiredPersonalInfos):
"""
Args:
ctx: FContext
requiredPersonalInfos: set of PersonalInfo
"""
return self._methods['getPersonalInfos']([ctx, requiredPersonalInfos])
def _getPersonalInfos(self, ctx, requiredPersonalInfos):
self._send_getPersonalInfos(ctx, requiredPersonalInfos)
return self._recv_getPersonalInfos(ctx)
def _send_getPersonalInfos(self, ctx, requiredPersonalInfos):
oprot = self._oprot
with self._write_lock:
oprot.get_transport().set_timeout(ctx.timeout)
oprot.write_request_headers(ctx)
oprot.writeMessageBegin('getPersonalInfos', TMessageType.CALL, 0)
args = getPersonalInfos_args()
args.requiredPersonalInfos = requiredPersonalInfos
args.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
def _recv_getPersonalInfos(self, ctx):
self._iprot.read_response_headers(ctx)
_, mtype, _ = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
if x.type == TApplicationExceptionType.RESPONSE_TOO_LARGE:
raise TTransportException(type=TTransportExceptionType.RESPONSE_TOO_LARGE, message=x.message)
raise x
result = getPersonalInfos_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.success is not None:
return result.success
x = TApplicationException(TApplicationExceptionType.MISSING_RESULT, "getPersonalInfos failed: unknown result")
raise x
def getPrimaryClientsForChannel(self, ctx, userMids):
"""
Args:
ctx: FContext
userMids: list of string
"""
return self._methods['getPrimaryClientsForChannel']([ctx, userMids])
def _getPrimaryClientsForChannel(self, ctx, userMids):
self._send_getPrimaryClientsForChannel(ctx, userMids)
return self._recv_getPrimaryClientsForChannel(ctx)
def _send_getPrimaryClientsForChannel(self, ctx, userMids):
oprot = self._oprot
with self._write_lock:
oprot.get_transport().set_timeout(ctx.timeout)
oprot.write_request_headers(ctx)
oprot.writeMessageBegin('getPrimaryClientsForChannel', TMessageType.CALL, 0)
args = getPrimaryClientsForChannel_args()
args.userMids = userMids
args.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
def _recv_getPrimaryClientsForChannel(self, ctx):
self._iprot.read_response_headers(ctx)
_, mtype, _ = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
if x.type == TApplicationExceptionType.RESPONSE_TOO_LARGE:
raise TTransportException(type=TTransportExceptionType.RESPONSE_TOO_LARGE, message=x.message)
raise x
result = getPrimaryClientsForChannel_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.success is not None:
return result.success
x = TApplicationException(TApplicationExceptionType.MISSING_RESULT, "getPrimaryClientsForChannel failed: unknown result")
raise x
def addBuddyToContact(self, ctx, buddyMid):
"""
Args:
ctx: FContext
buddyMid: string
"""
return self._methods['addBuddyToContact']([ctx, buddyMid])
def _addBuddyToContact(self, ctx, buddyMid):
self._send_addBuddyToContact(ctx, buddyMid)
return self._recv_addBuddyToContact(ctx)
def _send_addBuddyToContact(self, ctx, buddyMid):
oprot = self._oprot
with self._write_lock:
oprot.get_transport().set_timeout(ctx.timeout)
oprot.write_request_headers(ctx)
oprot.writeMessageBegin('addBuddyToContact', TMessageType.CALL, 0)
args = addBuddyToContact_args()
args.buddyMid = buddyMid
args.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
def _recv_addBuddyToContact(self, ctx):
self._iprot.read_response_headers(ctx)
_, mtype, _ = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
if x.type == TApplicationExceptionType.RESPONSE_TOO_LARGE:
raise TTransportException(type=TTransportExceptionType.RESPONSE_TOO_LARGE, message=x.message)
raise x
result = addBuddyToContact_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.success is not None:
return result.success
x = TApplicationException(TApplicationExceptionType.MISSING_RESULT, "addBuddyToContact failed: unknown result")
raise x
def getGroupMemberMidsForAppPlatform(self, ctx, groupId):
"""
Args:
ctx: FContext
groupId: string
"""
return self._methods['getGroupMemberMidsForAppPlatform']([ctx, groupId])
def _getGroupMemberMidsForAppPlatform(self, ctx, groupId):
self._send_getGroupMemberMidsForAppPlatform(ctx, groupId)
return self._recv_getGroupMemberMidsForAppPlatform(ctx)
def _send_getGroupMemberMidsForAppPlatform(self, ctx, groupId):
oprot = self._oprot
with self._write_lock:
oprot.get_transport().set_timeout(ctx.timeout)
oprot.write_request_headers(ctx)
oprot.writeMessageBegin('getGroupMemberMidsForAppPlatform', TMessageType.CALL, 0)
args = getGroupMemberMidsForAppPlatform_args()
args.groupId = groupId
args.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
def _recv_getGroupMemberMidsForAppPlatform(self, ctx):
self._iprot.read_response_headers(ctx)
_, mtype, _ = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
if x.type == TApplicationExceptionType.RESPONSE_TOO_LARGE:
raise TTransportException(type=TTransportExceptionType.RESPONSE_TOO_LARGE, message=x.message)
raise x
result = getGroupMemberMidsForAppPlatform_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.success is not None:
return result.success
x = TApplicationException(TApplicationExceptionType.MISSING_RESULT, "getGroupMemberMidsForAppPlatform failed: unknown result")
raise x
def getUserLanguage(self, ctx):
"""
Args:
ctx: FContext
"""
return self._methods['getUserLanguage']([ctx])
def _getUserLanguage(self, ctx):
self._send_getUserLanguage(ctx)
return self._recv_getUserLanguage(ctx)
def _send_getUserLanguage(self, ctx):
oprot = self._oprot
with self._write_lock:
oprot.get_transport().set_timeout(ctx.timeout)
oprot.write_request_headers(ctx)
oprot.writeMessageBegin('getUserLanguage', TMessageType.CALL, 0)
args = getUserLanguage_args()
args.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
def _recv_getUserLanguage(self, ctx):
self._iprot.read_response_headers(ctx)
_, mtype, _ = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
if x.type == TApplicationExceptionType.RESPONSE_TOO_LARGE:
raise TTransportException(type=TTransportExceptionType.RESPONSE_TOO_LARGE, message=x.message)
raise x
result = getUserLanguage_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.success is not None:
return result.success
x = TApplicationException(TApplicationExceptionType.MISSING_RESULT, "getUserLanguage failed: unknown result")
raise x
def lookupPaidCall(self, ctx, dialedNumber, language, referer):
"""
Args:
ctx: FContext
dialedNumber: string
language: string
referer: string
"""
return self._methods['lookupPaidCall']([ctx, dialedNumber, language, referer])
def _lookupPaidCall(self, ctx, dialedNumber, language, referer):
self._send_lookupPaidCall(ctx, dialedNumber, language, referer)
return self._recv_lookupPaidCall(ctx)
def _send_lookupPaidCall(self, ctx, dialedNumber, language, referer):
oprot = self._oprot
with self._write_lock:
oprot.get_transport().set_timeout(ctx.timeout)
oprot.write_request_headers(ctx)
oprot.writeMessageBegin('lookupPaidCall', TMessageType.CALL, 0)
args = lookupPaidCall_args()
args.dialedNumber = dialedNumber
args.language = language
args.referer = referer
args.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
def _recv_lookupPaidCall(self, ctx):
self._iprot.read_response_headers(ctx)
_, mtype, _ = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
if x.type == TApplicationExceptionType.RESPONSE_TOO_LARGE:
raise TTransportException(type=TTransportExceptionType.RESPONSE_TOO_LARGE, message=x.message)
raise x
result = lookupPaidCall_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.success is not None:
return result.success
x = TApplicationException(TApplicationExceptionType.MISSING_RESULT, "lookupPaidCall failed: unknown result")
raise x
def getExtendedProfile(self, ctx):
"""
Args:
ctx: FContext
"""
return self._methods['getExtendedProfile']([ctx])
def | |
from rest_framework import serializers
from .models import *
class CoordinatorSerializer(serializers.ModelSerializer):
# ModelID = serializers.CharField(max_length=100, required=True)
CourseID = serializers.CharField(max_length=100, required=True)
FName = serializers.CharField(max_length=100, required=False)
LName = serializers.CharField(max_length=100, required=False)
Phone = serializers.CharField(max_length=100, required=False)
Office = serializers.CharField(max_length=100, required=False)
Email = serializers.CharField(max_length=100, required=False)
def create(self, validated_data):
# Once the request data has been validated, we can create a todo item instance in the database
return Coordinator.objects.create(
ModelID=validated_data.get('ModelID'),
CourseID=validated_data.get('CourseID'),
FName=validated_data.get('FName'),
LName=validated_data.get('LName'),
Phone=validated_data.get('Phone'),
Office=validated_data.get('Office'),
Email=validated_data.get('Email')
)
def update(self, instance, validated_data):
# Once the request data has been validated, we can update the todo item instance in the database
instance.ModelID = validated_data.get('ModelID', instance.ModelID)
instance.CourseID = validated_data.get('CourseID', instance.CourseID)
instance.FName = validated_data.get('FName', instance.FName)
instance.LName = validated_data.get('LName', instance.LName)
instance.Phone = validated_data.get('Phone', instance.Phone)
instance.Office = validated_data.get('Office', instance.Office)
instance.Email = validated_data.get('Email', instance.Email)
instance.save()
return instance
class Meta:
model = Coordinator
fields = (
'ModelID',
'CourseID',
'FName',
'LName',
'Phone',
'Office',
'Email'
)
class InfoSerializer(serializers.ModelSerializer):
# ModelID = serializers.CharField(max_length=100, required=True)
CourseID = serializers.CharField(max_length=100, required=True)
GradeNotes = serializers.CharField(max_length=5000, required=False)
Examination = serializers.CharField(max_length=5000, required=False)
CourseDescription = serializers.CharField(max_length=5000, required=False)
UseCalc = serializers.CharField(max_length=100, required=False)
def create(self, validated_data):
return Info.objects.create(
ModelID=validated_data.get('ModelID'),
CourseID=validated_data.get('CourseID'),
GradeNotes=validated_data.get('GradeNotes'),
Examination=validated_data.get('Examination'),
CourseDescription=validated_data.get('CourseDescription'),
UseCalc=validated_data.get('UseCalc')
)
def update(self, instance, validated_data):
instance.ModelID = validated_data.get('ModelID', instance.ModelID)
instance.CourseID = validated_data.get('CourseID', instance.CourseID)
instance.GradeNotes = validated_data.get('GradeNotes', instance.GradeNotes)
instance.Examination = validated_data.get('Examination', instance.Examination)
instance.CourseDescription = validated_data.get('CourseDescription', instance.CourseDescription)
instance.UseCalc = validated_data.get('UseCalc', instance.UseCalc)
instance.save()
return instance
class Meta:
model = Info
fields = (
'ModelID',
'CourseID',
'GradeNotes',
'Examination',
'CourseDescription',
'UseCalc'
)
class GradeDeterminationSerializer(serializers.ModelSerializer):
# ModelID = serializers.CharField(max_length=100, required=True)
CourseID = serializers.CharField(max_length=100, required=True)
Component = serializers.CharField(max_length=100, required=False)
OutcomeEvaluated = serializers.CharField(max_length=100, required=False)
Weight = serializers.IntegerField(required=False)
def create(self, validated_data):
# Once the request data has been validated, we can create a todo item instance in the database
return GradeDetermination.objects.create(
ModelID=validated_data.get('ModelID'),
CourseID=validated_data.get('CourseID'),
Component=validated_data.get('Component'),
OutcomeEvaluated=validated_data.get('OutcomeEvaluated'),
Weight=validated_data.get('Weight'),
)
def update(self, instance, validated_data):
# Once the request data has been validated, we can update the todo item instance in the database
instance.ModelID = validated_data.get('ModelID', instance.ModelID)
instance.CourseID = validated_data.get('CourseID', instance.CourseID)
instance.Component = validated_data.get('Component', instance.Component)
instance.OutcomeEvaluated = validated_data.get('OutcomeEvaluated', instance.OutcomeEvaluated)
instance.Weight = validated_data.get('Weight', instance.Weight)
instance.save()
return instance
class Meta:
model = GradeDetermination
fields = (
'ModelID',
'CourseID',
'Component',
'OutcomeEvaluated',
'Weight'
)
class OutcomeSerializer(serializers.ModelSerializer):
# ModelID = serializers.CharField(max_length=100, required=True)
CourseID = serializers.CharField(max_length=100, required=True)
OutcomeNum = serializers.IntegerField(required=False) # removed max_length=100
Description = serializers.CharField(max_length=500, required=False) # Changed max_length to 500
GraduateAttribute = serializers.CharField(max_length=100, required=False)
InstructionLvl = serializers.CharField(max_length=100, required=False)
def create(self, validated_data):
return Outcome.objects.create(
ModelID=validated_data.get('ModelID'),
CourseID=validated_data.get('CourseID'),
OutcomeNum=validated_data.get('OutcomeNum'),
Description=validated_data.get('Description'),
GraduateAttribute=validated_data.get('GraduateAttribute'),
InstructionLvl=validated_data.get('InstructionLvl'),
)
def update(self, instance, validated_data):
instance.ModelID = validated_data.get('ModelID', instance.ModelID)
instance.CourseID = validated_data.get('CourseID', instance.CourseID)
instance.OutcomeNum = validated_data.get('OutcomeNum', instance.OutcomeNum)
instance.Description = validated_data.get('Description', instance.Description)
instance.GraduateAttribute = validated_data.get('GraduateAttribute', instance.GraduateAttribute)
instance.InstructionLvl = validated_data.get('InstructionLvl', instance.InstructionLvl)
instance.save()
return instance
class Meta:
model = Outcome
fields = (
'ModelID',
'CourseID',
'OutcomeNum',
'Description',
'GraduateAttribute',
'InstructionLvl'
)
class TimetableSerializer(serializers.ModelSerializer):
# ModelID = serializers.CharField(max_length=100, required=True)
CourseID = serializers.CharField(max_length=100, required=True)
SectionNum = serializers.CharField(max_length=100, required=False)
Days = serializers.CharField(max_length=100, required=False)
Time = serializers.CharField(max_length=100, required=False)
Location = serializers.CharField(max_length=100, required=False)
def create(self, validated_data):
return Timetable.objects.create(
ModelID=validated_data.get('ModelID'),
CourseID=validated_data.get('CourseID'),
SectionNum=validated_data.get('SectionNum'),
Days=validated_data.get('Days'),
Time=validated_data.get('Time'),
Location=validated_data.get('Location'),
)
def update(self, instance, validated_data):
instance.ModelID = validated_data.get('ModelID', instance.ModelID)
instance.CourseID = validated_data.get('CourseID', instance.CourseID)
instance.SectionNum = validated_data.get('SectionNum', instance.SectionNum)
instance.Days = validated_data.get('Days', instance.Days)
instance.Time = validated_data.get('Time', instance.Time)
instance.Location = validated_data.get('Location', instance.Location)
instance.save()
return instance
class Meta:
model = Timetable
fields = (
'ModelID',
'CourseID',
'SectionNum',
'Days',
'Time',
'Location'
)
class GradeDistributionSerializer(serializers.ModelSerializer):
# ModelID = serializers.CharField(max_length=100, required=True)
CourseID = serializers.CharField(max_length=100, required=True)
LowerLimit = serializers.IntegerField(required=False) # removed max_length = 100
UpperLimit = serializers.IntegerField(required=False) # removed max_length = 100
LetterGrade = serializers.CharField(max_length=100, required=False)
def create(self, validated_data):
return GradeDistribution.objects.create(
ModelID=validated_data.get('ModelID'),
CourseID=validated_data.get('CourseID'),
LowerLimit=validated_data.get('LowerLimit'),
UpperLimit=validated_data.get('UpperLimit'),
LetterGrade=validated_data.get('LetterGrade'),
)
def update(self, instance, validated_data):
instance.ModelID = validated_data.get('ModelID', instance.ModelID)
instance.CourseID = validated_data.get('CourseID', instance.CourseID)
instance.LowerLimit = validated_data.get('LowerLimit', instance.LowerLimit)
instance.UpperLimit = validated_data.get('UpperLimit', instance.UpperLimit)
instance.LetterGrade = validated_data.get('LetterGrade', instance.LetterGrade)
instance.save()
return instance
class Meta:
model = GradeDistribution
fields = (
'ModelID',
'CourseID',
'LowerLimit',
'UpperLimit',
'LetterGrade'
)
class LectureSerializer(serializers.ModelSerializer):
# ModelID = serializers.CharField(max_length=100, required=True)
CourseID = serializers.CharField(max_length=100, required=True)
LectureNum = serializers.CharField(max_length=100, required=False)
FName = serializers.CharField(max_length=100, required=False)
LName = serializers.CharField(max_length=100, required=False)
Phone = serializers.CharField(max_length=100, required=False)
Office = serializers.CharField(max_length=100, required=False)
Email = serializers.CharField(max_length=100, required=False)
def create(self, validated_data):
return Lecture.objects.create(
ModelID=validated_data.get('ModelID'),
CourseID=validated_data.get('CourseID'),
LectureNum=validated_data.get('LectureNum'),
FName=validated_data.get('FName'),
LName=validated_data.get('LName'),
Phone=validated_data.get('Phone'),
Office=validated_data.get('Office'),
Email=validated_data.get('Email'),
)
def update(self, instance, validated_data):
instance.ModelID = validated_data.get('ModelID', instance.ModelID)
instance.CourseID = validated_data.get('CourseID', instance.CourseID)
instance.LectureNum = validated_data.get('LectureNum', instance.LectureNum)
instance.FName = validated_data.get('FName', instance.FName)
instance.LName = validated_data.get('LName', instance.LName)
instance.Phone = validated_data.get('Phone', instance.Phone)
instance.Office = validated_data.get('Office', instance.Office)
instance.Email = validated_data.get('Email', instance.Email)
instance.save()
return instance
class Meta:
model = Lecture
fields = (
'ModelID',
'CourseID',
'LectureNum',
'FName',
'LName',
'Phone',
'Office',
'Email'
)
class TutorialSerializer(serializers.ModelSerializer):
# ModelID = serializers.CharField(max_length=100, required=True)
CourseID = serializers.CharField(max_length=100, required=True)
TutorialNum = serializers.CharField(max_length=100, required=False) # Changed Tutorial Num to CharField
FName = serializers.CharField(max_length=100, required=False) # Changed FName to CharField
LName = serializers.CharField(max_length=100, required=False)
Phone = serializers.CharField(max_length=100, required=False)
Office = serializers.CharField(max_length=100, required=False)
Email = serializers.CharField(max_length=100, required=False)
def create(self, validated_data):
return Tutorial.objects.create(
ModelID=validated_data.get('ModelID'),
CourseID=validated_data.get('CourseID'),
TutorialNum=validated_data.get('TutorialNum'),
FName=validated_data.get('FName'),
LName=validated_data.get('LName'),
Phone=validated_data.get('Phone'),
Office=validated_data.get('Office'),
Email=validated_data.get('Email'),
)
def update(self, instance, validated_data):
instance.ModelID = validated_data.get('ModelID', instance.ModelID)
instance.CourseID = validated_data.get('CourseID', instance.CourseID)
instance.TutorialNum = validated_data.get('TutorialNum', instance.TutorialNum)
instance.FName = validated_data.get('FName', instance.FName)
instance.LName = validated_data.get('LName', instance.LName)
instance.Phone = validated_data.get('Phone', instance.Phone)
instance.Office = validated_data.get('Office', instance.Office)
instance.Email = validated_data.get('Email', instance.Email)
instance.save()
return instance
class Meta:
model = Tutorial
fields = (
'ModelID',
'CourseID',
'TutorialNum',
'FName',
'LName',
'Phone',
'Office',
'Email'
)
class CourseSerializer(serializers.ModelSerializer):
# ModelID = serializers.CharField(max_length=100, required=True)
CourseID = serializers.CharField(max_length=100, required=True)
CourseHours = serializers.CharField(max_length=100, required=False) # Changed CourseHours to CharField
CourseName = serializers.CharField(max_length=100, required=False) # Changed CourseName to CharField
CalenderRefrence = serializers.CharField(max_length=100, required=False)
AcademicCredit = serializers.IntegerField(required=False) # Changed AcademicCredit to IntegerField
DateCreated = serializers.CharField(max_length=100, required=False)
def create(self, validated_data):
return Course.objects.create(
ModelID=validated_data.get('ModelID'),
CourseID=validated_data.get('CourseID'),
CourseHours=validated_data.get('CourseHours'),
CourseName=validated_data.get('CourseName'),
CalenderRefrence=validated_data.get('CalenderRefrence'),
AcademicCredit=validated_data.get('AcademicCredit'),
DateCreated=validated_data.get('DateCreated'),
)
def update(self, instance, validated_data):
instance.ModelID = validated_data.get('ModelID', instance.ModelID)
instance.CourseID = validated_data.get('CourseID', instance.CourseID)
instance.CourseHours = validated_data.get('CourseHours', instance.CourseHours)
instance.CourseName = validated_data.get('CourseName', instance.CourseName)
instance.CalenderRefrence = validated_data.get('CalenderRefrence', instance.CalenderRefrence)
instance.AcademicCredit = validated_data.get('AcademicCredit', instance.AcademicCredit)
instance.DateCreated = validated_data.get('DateCreated', instance.DateCreated)
instance.save()
return instance
class Meta:
model = Course
fields = (
'ModelID',
'CourseID',
'CourseHours',
'CourseName',
'CalenderRefrence',
'AcademicCredit',
'DateCreated'
)
class TextbookSerializer(serializers.ModelSerializer):
# ModelID = serializers.CharField(max_length=100, required=True)
CourseID = serializers.CharField(max_length=100, required=True)
TITLE = serializers.CharField(max_length=100, required=False)
Publisher = serializers.CharField(max_length=100, required=False)
Author = serializers.CharField(max_length=100, required=False)
Edition = serializers.CharField(max_length=100, required=False)
type = serializers.CharField(max_length=100, required=False)
def create(self, validated_data):
return Textbook.objects.create(
ModelID=validated_data.get('ModelID'),
CourseID=validated_data.get('CourseID'),
TITLE=validated_data.get('TITLE'),
Publisher=validated_data.get('Publisher'),
Author=validated_data.get('Author'),
Edition=validated_data.get('Edition'),
type=validated_data.get('type'),
)
def update(self, instance, validated_data):
instance.ModelID = validated_data.get('ModelID', instance.ModelID)
instance.CourseID = validated_data.get('CourseID', instance.CourseID)
instance.TITLE = validated_data.get('TITLE', instance.TITLE)
instance.Publisher = validated_data.get('Publisher', instance.Publisher)
instance.Author = validated_data.get('Author', instance.Author)
instance.Edition = validated_data.get('Edition', instance.Edition)
instance.type = validated_data.get('type', instance.type)
instance.save()
return instance
class Meta:
model = Textbook
fields = (
'ModelID',
'CourseID',
'TITLE',
'Publisher',
'Author',
'Edition',
'type'
)
class AuWeightSerializer(serializers.ModelSerializer):
# ModelID = serializers.CharField(max_length=100, required=True)
CourseID = serializers.CharField(max_length=100, required=True)
Category = serializers.CharField(max_length=100, required=True)
AU = serializers.IntegerField(required=False)
def create(self, validated_data):
return AuWeight.objects.create(
ModelID=validated_data.get('ModelID'),
CourseID=validated_data.get('CourseID'),
Category=validated_data.get('Category'),
AU=validated_data.get('AU'),
)
def update(self, instance, validated_data):
instance.ModelID = validated_data.get('ModelID', instance.ModelID)
instance.CourseID = validated_data.get('CourseID', instance.CourseID)
instance.Category = validated_data.get('Category', instance.Category)
instance.AU = validated_data.get('AU', instance.AU)
instance.save()
return instance
class Meta:
model = AuWeight
fields = (
'ModelID',
'CourseID',
'Category',
'AU'
)
class ContentCategorySerializer(serializers.ModelSerializer):
# ModelID = serializers.CharField(max_length=100, required=True)
CourseID = serializers.CharField(max_length=100, required=True)
CategoryType = serializers.CharField(max_length=100, required=True)
Element = serializers.CharField(max_length=100, required=True)
def create(self, validated_data):
return ContentCategory.objects.create(
ModelID=validated_data.get('ModelID'),
CourseID=validated_data.get('CourseID'),
CategoryType=validated_data.get('CategoryType'),
Element=validated_data.get('Element'),
)
def update(self, instance, validated_data):
instance.ModelID = validated_data.get('ModelID', instance.ModelID)
instance.CourseID = validated_data.get('CourseID', instance.CourseID)
instance.CategoryType = validated_data.get('CategoryType', instance.CategoryType)
instance.Element = validated_data.get('Element', instance.Element)
instance.save()
return instance
class Meta:
model = ContentCategory
fields = (
'ModelID',
'CourseID',
'CategoryType',
'Element'
)
class LabSerializer(serializers.ModelSerializer):
# ModelID = serializers.CharField(max_length=100, required=True)
CourseID = serializers.CharField(max_length=100, required=True)
LabNum = serializers.CharField(max_length=100, required=True)
NumberOfLabs = serializers.IntegerField(required=False)
LabType = serializers.CharField(max_length=100, required=True)
SafetyExamined = serializers.CharField(max_length=100, required=True)
SafetyTaught = serializers.CharField(max_length=100, required=True)
FName = serializers.CharField(max_length=100, required=True)
LName = serializers.CharField(max_length=100, required=True)
Phone = serializers.CharField(max_length=100, required=True)
Office = serializers.CharField(max_length=100, required=True)
Email = serializers.CharField(max_length=100, required=True)
def create(self, validated_data):
return Lab.objects.create(
ModelID=validated_data.get('ModelID'),
CourseID=validated_data.get('CourseID'),
LabNum=validated_data.get('LabNum'),
NumberOfLabs=validated_data.get('NumberOfLabs'),
LabType=validated_data.get('LabType'),
SafetyExamined=validated_data.get('SafetyExamined'),
SafetyTaught=validated_data.get('SafetyTaught'),
FName=validated_data.get('FName'),
LName=validated_data.get('LName'),
Phone=validated_data.get('Phone'),
Office=validated_data.get('Office'),
Email=validated_data.get('Email'),
)
def update(self, instance, validated_data):
instance.ModelID = validated_data.get('ModelID', instance.ModelID)
instance.CourseID = validated_data.get('CourseID', instance.CourseID)
instance.LabNum = validated_data.get('LabNum', instance.LabNum)
instance.NumberOfLabs = validated_data.get('NumberOfLabs', instance.NumberOfLabs)
instance.LabType = validated_data.get('LabType', instance.LabType)
instance.SafetyExamined = validated_data.get('SafetyExamined', instance.SafetyExamined)
instance.SafetyTaught = validated_data.get('SafetyTaught', instance.SafetyTaught)
instance.FName = validated_data.get('FName', instance.FName)
instance.LName = validated_data.get('LName', instance.LName)
instance.Phone = validated_data.get('Phone', instance.Phone)
instance.Office = validated_data.get('Office', instance.Office)
instance.Email = validated_data.get('Email', instance.Email)
instance.save()
return instance
class Meta:
model = Lab
fields = (
'ModelID',
'CourseID',
'LabNum',
'NumberOfLabs',
'LabType',
'SafetyExamined',
'SafetyTaught',
'FName',
'LName',
'Phone',
'Office',
'Email'
)
class SectionSerializer(serializers.ModelSerializer):
# | |
<gh_stars>1-10
# coding:utf-8
# noinspection PyUnresolvedReferences
from maya import cmds, OpenMaya, OpenMayaUI
from LxBasic import bscMtdCore, bscMethods
#
from LxMaya.method.basic import _maMethodBasic
#
from LxMaya.method.config import _maConfig
#
class Mtd_MaAnimation(object):
app_method = _maMethodBasic.Mtd_AppMaya
@staticmethod
def setCurrentFrame(frame):
cmds.currentTime(frame)
@staticmethod
def getCurrentFrame():
return cmds.currentTime(query=1)
@staticmethod
def setAnimationFrameRange(startFrame, endFrame):
cmds.playbackOptions(minTime=startFrame), cmds.playbackOptions(animationStartTime=int(startFrame) - 5)
cmds.playbackOptions(maxTime=endFrame), cmds.playbackOptions(animationEndTime=int(endFrame) + 5)
@classmethod
def toFrameRange(cls, frame):
if isinstance(frame, tuple) or isinstance(frame, list):
startFrame, endFrame = frame
elif isinstance(frame, int) or isinstance(float, float):
startFrame = endFrame = frame
else:
startFrame = endFrame = cls.getCurrentFrame()
return startFrame, endFrame
#
class MaAssemblyMethod(_maMethodBasic.Mtd_AppMaya):
_nodeMethod = _maMethodBasic.MaNodeMethodBasic
@classmethod
def getAssemblyReferenceLis(cls):
pass
@staticmethod
def getAssemblyDefinitionFile(assemblyReferenceString):
attr = assemblyReferenceString + '.definition'
return cmds.getAttr(attr)
@staticmethod
def setAssemblyActive(assemblyReferenceString, name):
cmds.assembly(assemblyReferenceString + '.representations', edit=1, active=name)
@staticmethod
def getAssemblyActive(assemblyReferenceString):
return cmds.assembly(assemblyReferenceString, query=1, active=1) or 'None'
@staticmethod
def getAssemblyNamespace(assemblyReferenceString):
return cmds.assembly(assemblyReferenceString, query=1, repNamespace=1)
@classmethod
def setAssemblySceneDefinitionCreate(cls, assemblyReferenceString, fileString_):
cmds.assembly(name=assemblyReferenceString, type=cls._maConfig.DEF_mya_type_assembly_definition)
cmds.assembly(assemblyReferenceString, edit=1, createRepresentation='Scene', input=fileString_)
cmds.assembly(assemblyReferenceString, edit=1, active=bscMethods.OsFile.basename(fileString_))
@classmethod
def setAssemblyReferenceCreate(cls, assemblyReferenceString, osAssemblyDefinitionFile):
cmds.assembly(name=assemblyReferenceString, type=cls._maConfig.DEF_mya_type_assembly_reference)
cmds.setAttr(assemblyReferenceString + '.definition', osAssemblyDefinitionFile, type='string')
#
class MaWindowMethod(_maMethodBasic.Mtd_AppMaya):
MaDefShader = 'lambert1'
MaDefWindowMaximum = 2048
MaDefBackgroundRgb = .25, .25, .25
MaDefShaderRgb = 0, .75, .75
@staticmethod
def isWindowExists(nameText):
return cmds.window(nameText, exists=1)
@staticmethod
def setVisiblePanelsDelete():
viewPanelLis = cmds.getPanel(visiblePanels=1)
for viewport in viewPanelLis:
if viewport != 'modelPanel4':
if cmds.panel(viewport, exists=1):
window = viewport + 'Window'
if cmds.window(window, exists=1):
cmds.deleteUI(window)
@classmethod
def setCreateWindow(cls, nameText, width, height, percent=.5):
cls.setWindowDelete(nameText)
#
width, height = bscMethods.Size2d.remapTo(width, height, maximum=cls.MaDefWindowMaximum)
cmds.window(
nameText,
title=bscMethods.StrCamelcase.toPrettify(nameText)
)
#
cmds.showWindow(nameText)
#
return cmds.paneLayout(
width=width*percent,
height=height*percent
)
@classmethod
def setDefaultShaderColor(cls, r, g, b):
cmds.setAttr(cls.MaDefShader + '.color', r, g, b)
@staticmethod
def setBackgroundColor(r, g, b):
cmds.displayRGBColor('background', r, g, b)
cmds.displayRGBColor('backgroundTop', r, g, b)
cmds.displayRGBColor('backgroundBottom', r, g, b)
@classmethod
def setWindowDelete(cls, nameText):
if cls.isWindowExists(nameText):
cmds.deleteUI(nameText)
#
class Mtd_MaViewport(_maMethodBasic.Mtd_AppMaya):
MaDefViewportViewOptionKwargs = dict(
displayAppearance='smoothShaded',
displayLights='default',
useDefaultMaterial=False,
wireframeOnShaded=False,
fogging=False,
twoSidedLighting=True,
manipulators=False,
grid=False,
headsUpDisplay=False,
selectionHiliteDisplay=False,
)
MaDefViewportObjectDisplayOptionKwargs = dict(
polymeshes=True,
subdivSurfaces=True,
fluids=True,
strokes=True,
nCloths=True,
nParticles=True,
pluginShapes=True,
pluginObjects=['gpuCacheDisplayFilter', 1]
)
@classmethod
def setCreateViewPanel(cls, viewport, layout, camera, menuBarVisible=False):
return cmds.modelPanel(
label=bscMethods.StrCamelcase.toPrettify(viewport),
parent=layout,
camera=camera,
menuBarVisible=menuBarVisible,
)
@classmethod
def setViewportView(cls, viewport, optionKwargs=None):
if optionKwargs is None:
optionKwargs = cls.MaDefViewportViewOptionKwargs.copy()
cmds.modelEditor(
viewport,
edit=1,
activeView=1,
#
**optionKwargs
)
@classmethod
def setViewportObjectDisplay(cls, viewport, optionKwargs=None):
cmds.modelEditor(
viewport,
edit=1,
activeView=1,
#
allObjects=0,
)
if optionKwargs is None:
optionKwargs = cls.MaDefViewportObjectDisplayOptionKwargs.copy()
#
cmds.modelEditor(
viewport,
edit=1,
activeView=1,
#
**optionKwargs
)
@staticmethod
def setViewportVp2Renderer(viewport, lineAAEnable=True, multiSampleEnable=True, ssaoEnable=True):
rendererName = 'vp2Renderer'
panelType = cmds.getPanel(typeOf=viewport)
if panelType == 'modelPanel':
cmds.modelEditor(
viewport,
edit=1,
rendererName=rendererName,
rendererOverrideName='myOverride'
)
cmds.setAttr('hardwareRenderingGlobals.lineAAEnable', lineAAEnable)
cmds.setAttr('hardwareRenderingGlobals.multiSampleEnable', multiSampleEnable)
cmds.setAttr('hardwareRenderingGlobals.ssaoEnable', ssaoEnable)
@staticmethod
def setViewportDefaultDisplayMode(viewport):
cmds.modelEditor(
viewport,
edit=1,
activeView=1,
useDefaultMaterial=1,
displayAppearance='smoothShaded',
displayTextures=0,
displayLights='default',
shadows=0
)
@staticmethod
def setViewportShaderDisplayMode(viewport):
cmds.modelEditor(
viewport,
edit=1,
activeView=1,
useDefaultMaterial=0,
displayAppearance='smoothShaded',
displayTextures=0,
displayLights='default',
shadows=0
)
@staticmethod
def setViewportTextureDisplayMode(viewport):
cmds.modelEditor(
viewport,
edit=1,
activeView=1,
useDefaultMaterial=0,
displayAppearance='smoothShaded',
displayTextures=1,
displayLights='default',
shadows=0
)
@staticmethod
def setViewportLightDisplayMode(viewport):
cmds.modelEditor(
viewport,
edit=1,
activeView=1,
useDefaultMaterial=0,
displayAppearance='smoothShaded',
displayTextures=1,
displayLights='all',
shadows=1
)
@staticmethod
def setViewportSelectObjectIsolate(viewport, boolean=True):
cmds.isolateSelect(viewport, state=boolean)
#
class MaGeometryNodeMethod(_maMethodBasic.M2GeometryNodeMethodBasic):
pass
#
class MaCheckMethod(_maMethodBasic.M2GeometryNodeMethodBasic):
@classmethod
def filterGroupEmptyLis(cls, groupString):
lis = []
stringLis = cls._toAppExistStringList(groupString)
if stringLis:
for i in stringLis:
shapeLis = cls.getChildShapeLisByGroup(i)
if not shapeLis:
lis.append(i)
return lis
@classmethod
def fixGroupEmpty(cls, groupString):
stringLis = cls._toAppExistStringList(groupString)
[cls.setNodeDelete(i) for i in stringLis if cls._isAppExist(i)]
#
@classmethod
def filterNonShapeTransformLis(cls, nodepathString):
lis = []
#
stringLis = cls._toAppExistStringList(nodepathString)
if stringLis:
for transform in stringLis:
shapePath = cls._dcc_getNodShapeNodepathStr(transform)
if shapePath is None:
lis.append(transform)
return lis
@classmethod
def fixNonShapeTransform(cls, nodepathString):
stringLis = cls._toAppExistStringList(nodepathString)
[cls.setNodeDelete(i) for i in stringLis if cls._isAppExist(i)]
@classmethod
def filterObjectInstanceLis(cls, nodepathString):
lis = []
stringLis = cls._toAppExistStringList(nodepathString)
if stringLis:
for transform in stringLis:
shapePath = cls._dcc_getNodShapeNodepathStr(transform)
if cls.isObjectShapeInstanced(shapePath) is True:
lis.append(transform)
return lis
@classmethod
def fixObjectInstance(cls, nodepathString):
stringLis = cls._toAppExistStringList(nodepathString)
if stringLis:
for i in stringLis:
cls.setObjectInstanceCovert(i)
@classmethod
def filterObjectHistoryNodeDic(cls, nodepathString):
dic = {}
exceptNodeTypeLis = [
cls.DEF_mya_type_shading_engine,
cls.DEF_mya_type_group_id,
cls.DEF_mya_type_set
]
#
stringLis = cls._toAppExistStringList(nodepathString)
if stringLis:
for transform in stringLis:
stringLis = cmds.listHistory(transform, pruneDagObjects=1) or []
for node in stringLis:
nodeType = cls._getNodeCategoryString(node)
if not nodeType in exceptNodeTypeLis:
dic.setdefault(transform, []).append(node)
return dic
@classmethod
def fixObjectHistory(cls, nodepathString):
pass
@classmethod
def filterObjectNonDefaultMatrixLis(cls, nodepathString):
lis = []
#
stringLis = cls._toAppExistStringList(nodepathString)
if stringLis:
for i in stringLis:
if cls.isDefaultMatrix(i) is False:
lis.append(i)
return lis
@classmethod
def fixTransformNonDefaultMatrix(cls, nodepathString):
pass
@classmethod
def _toErrorDic(cls, errorLis):
dic = {}
if errorLis:
for i in errorLis:
meshPath = cls._dcc_getNodFullpathNodepathStr(i.split(cls.DEF_mya_node_port_pathsep)[0])
compPath = i
#
dic.setdefault(meshPath, []).append(compPath)
#
cmds.select(clear=1)
return dic
@classmethod
def getMeshNSideFaceDic(cls, meshObjectLis):
cmds.select(meshObjectLis)
cmds.polySelectConstraint(mode=3, type=8, size=3)
cmds.polySelectConstraint(mode=0, type=8, size=0)
return cls._toErrorDic(cmds.ls(selection=True))
@classmethod
def getMeshNonPlanarFaceDic(cls, meshObjectLis):
cmds.select(meshObjectLis)
cmds.polySelectConstraint(mode=3, type=8, planarity=1)
cmds.polySelectConstraint(mode=0, type=8, planarity=0)
return cls._toErrorDic(cmds.ls(selection=True))
@classmethod
def getMeshHoledFaceDic(cls, meshObjectLis):
cmds.select(meshObjectLis)
cmds.polySelectConstraint(mode=3, type=8, holes=1)
cmds.polySelectConstraint(mode=0, type=8, holes=0)
return cls._toErrorDic(cmds.ls(selection=True))
@classmethod
def getMeshConcaveFaceDic(cls, meshObjectLis):
cmds.select(meshObjectLis)
cmds.polySelectConstraint(mode=3, type=8, convexity=1)
cmds.polySelectConstraint(mode=0, type=8, convexity=0)
return cls._toErrorDic(cmds.ls(selection=True))
@classmethod
def getMeshSharedUvDic(cls, meshObjectLis):
cmds.select(meshObjectLis)
cmds.polySelectConstraint(mode=3, type=16, textureshared=1)
cmds.polySelectConstraint(mode=0, type=16, textureshared=0)
return cls._toErrorDic(cmds.ls(selection=True))
@classmethod
def getMeshZeroAreaFaceDic(cls, meshObjectLis):
miniValue = .0
maxiValue = .0
cmds.select(meshObjectLis)
cmds.polySelectConstraint(mode=3, type=8, geometricarea=1, geometricareabound=(miniValue, maxiValue))
cmds.polySelectConstraint(mode=0, type=8, geometricarea=0)
return cls._toErrorDic(cmds.ls(selection=True))
@classmethod
def getMeshZeroLengthEdgeDic(cls, meshObjectLis):
miniValue = .0
maxiValue = .0
cmds.select(meshObjectLis)
cmds.polySelectConstraint(mode=3, type=0x8000, length=1, lengthbound=(miniValue, maxiValue))
cmds.polySelectConstraint(mode=0, type=0x8000, length=0)
return cls._toErrorDic(cmds.ls(selection=True))
@classmethod
def getMeshZeroAreaUvDic(cls, meshObjectLis):
miniValue = .0
maxiValue = .0
cmds.select(meshObjectLis)
cmds.polySelectConstraint(mode=3, type=8, geometricarea=1, geometricareabound=(miniValue, maxiValue))
cmds.polySelectConstraint(mode=3, type=8, texturedarea=1, texturedareabound=(miniValue, maxiValue))
cmds.polySelectConstraint(mode=0, type=8, texturedarea=0, geometricarea=0)
return cls._toErrorDic(cmds.ls(selection=True))
@classmethod
def getMeshLaminaFaceDic(cls, meshObjectLis):
cmds.select(meshObjectLis)
cmds.polySelectConstraint(mode=3, type=8, topology=2)
cmds.polySelectConstraint(mode=0, type=8, topology=0)
return cls._toErrorDic(cmds.ls(selection=True))
@classmethod
def getMeshNonTriangulableFaceDic(cls, meshObjectLis):
cmds.select(meshObjectLis)
cmds.polySelectConstraint(mode=3, type=8, topology=1)
cmds.polySelectConstraint(mode=0, type=8, topology=0)
return cls._toErrorDic(cmds.ls(selection=True))
@classmethod
def getMeshNonMappingFaceDic(cls, meshObjectLis):
cmds.select(meshObjectLis)
cmds.polySelectConstraint(mode=3, type=8, textured=2)
cmds.polySelectConstraint(mode=0, type=8, textured=0)
return cls._toErrorDic(cmds.ls(selection=True))
@classmethod
def getMeshNonManifoldVertexDic(cls, meshObjectLis):
cmds.select(meshObjectLis)
return cls._toErrorDic(cmds.polyInfo(nonManifoldVertices=1))
@classmethod
def getMeshNonManifoldEdgeDic(cls, meshObjectLis):
cmds.select(meshObjectLis)
return cls._toErrorDic(cmds.polyInfo(nonManifoldEdges=1))
@classmethod
def filterObjectNameOverlapDic(cls, nodepathString):
dic = {}
#
stringLis = cls._toAppExistStringList(nodepathString)
if stringLis:
for transform in stringLis:
nodeName = cls._nodeString2nodename_(transform)
data = cmds.ls(nodeName, long=1) or []
if len(data) > 1:
for i in data:
if not i == transform:
dic.setdefault(transform, []).append(i)
return dic
@classmethod
def getMeshNormalLockVertexDic(cls, nodepathString):
dic = {}
#
stringLis = cls._toAppExistStringList(nodepathString)
if stringLis:
for transform in stringLis:
vertexIdLis = cls.getMeshNormalLockVertexLis(transform)
if vertexIdLis:
dic[transform] = cls._toMeshVertexComp(transform, vertexIdLis)
return dic
@classmethod
def getMeshOpenEdgeDic(cls, nodepathString):
dic = {}
#
stringLis = cls._toAppExistStringList(nodepathString)
if stringLis:
for i in stringLis:
edgeIdLis = cls.getMeshOpenEdgeIdLis(i)
if edgeIdLis:
dic[i] = cls._toMeshEdgeComp(i, edgeIdLis)
#
return dic
@classmethod
def maAstModelGeometryCheckConfigDic(cls):
return bscMtdCore.orderedDict(
[
('meshInstanceCheck', (True, 'Mesh has Instance', u'存在关联复制的"Mesh"', cls.filterObjectInstanceLis, None)),
('meshHistoryCheck', (True, 'Mesh has History Nde_Node(s)', u'存在历史记录的"Mesh"', cls.filterObjectHistoryNodeDic, None)),
#
('meshOverlapNameCheck', (True, 'Mesh has Overlap Name', u'存在重名的"Mesh"', cls.filterObjectNameOverlapDic, None)),
#
('meshMatrixNonDefaultCheck', (True, 'Mesh Matrix is Non - Default ', (u'非默认的"Mesh Matrix"', u'1."Transform"的"Transformation"存在数值', u'2."Group"的"Transformation"存在数值'), cls.filterObjectNonDefaultMatrixLis, None)),
#
('meshFaceNSidedCheck', (True, 'Mesh Face(s) is N - Sided', u'超过四边的"Mesh Face"', cls.getMeshNSideFaceDic, None)),
('meshFaceHoledCheck', (True, 'Mesh Face(s) is Holed', u'破损的"Mesh Face"', cls.getMeshHoledFaceDic, None)),
('meshFaceConcaveCheck', (False, 'Mesh Face(s) is Concave', u'凹形的"Mesh Face"', cls.getMeshConcaveFaceDic, None)),
('meshFaceNonPlanarCheck', (False, 'Mesh Face(s) is Non - planar', u'不平整的"Mesh Face"', cls.getMeshNonPlanarFaceDic, None)),
('meshFaceNonTriangulableCheck', (True, 'Mesh Face(s) is Non - Triangulable', u'无法三角化的"Mesh Face"', cls.getMeshNonTriangulableFaceDic, None)),
('meshFaceNonMappingCheck', (True, 'Mesh Face(s) is Non - Mapping', u'无Uv的"Mesh Face"', cls.getMeshNonMappingFaceDic, None)),
('meshFaceLaminaCheck', (True, 'Mesh Face(s) is Lamina', u'重合的"Mesh Face"', cls.getMeshLaminaFaceDic, None)),
#
('meshEdgeNonManifoldCheck', (True, 'Mesh Edge(s) is Non - Manifold', u'非流形的"Mesh Edge"', cls.getMeshNonManifoldEdgeDic, None)),
('meshFaceZeroAreaCheck', (True, 'Mesh Face(s) is Zero - Area', u'无面积的"Mesh Face"', cls.getMeshZeroAreaFaceDic, None)),
('meshEdgeZeroLengthCheck', (True, 'Mesh Edge(s) is Zero - Length', u'无长度的"Mesh Edge"', cls.getMeshZeroLengthEdgeDic, None)),
('meshEdgeOpenCheck', (True, 'Mesh Edge(s) is Open', u'开放的"Mesh Edge"', cls.getMeshOpenEdgeDic, None)),
#
('meshVertexNormalLockCheck', (True, 'Mesh Vertex(s) is Normal - Lock', u'锁定的"Mesh Vertex Normal"', cls.getMeshNormalLockVertexDic, None)),
('meshVertexNonManifoldCheck', (True, 'Mesh Vertex(s) is Non - Manifold', u'非流形的"Mesh Vertex"', cls.getMeshNonManifoldVertexDic, None)),
#
('meshUvSharedCheck', (False, 'Mesh Uv(s) is Shared', u'共用"Mesh Uv"', cls.getMeshSharedUvDic, None)),
('meshUvZeroAreaCheck', (True, 'Mesh Uv(s) is Zero - Area', u'无面积的"Mesh Uv"', cls.getMeshZeroAreaUvDic, None)),
]
)
@classmethod
def maAstModelTransformCheckConfigDic(cls):
return bscMtdCore.orderedDict(
[
('transformNonShapeCheck', (True, 'Transform has Non - Shape', u'无"Shape"的"Transform"', cls.filterNonShapeTransformLis, None))
]
)
@classmethod
def maAstModelGroupCheckConfigDic(cls):
return bscMtdCore.orderedDict(
[
('groupEmptyCheck', (True, 'Group is Empty', u'空的"Group"', cls.filterGroupEmptyLis, cls.fixGroupEmpty))
]
)
#
class MaCameraNodeMethod(_maMethodBasic.MaNodeMethodBasic):
MaDefDisplayGateMaskOpacity = 1
MaDefDisplayGateMaskColor = 0, 0, 0
#
MaDefCameraOptionKwargs = dict(
displayResolution=True,
displayFilmGate=False,
displayGateMask=True,
displaySafeTitle=False,
displaySafeAction=False,
displayFieldChart=False,
filmFit=1,
overscan=1
)
@classmethod
def setCameraView(cls, nodepathString=None, optionKwargs=None):
if nodepathString is None:
shapePath = cls.getActiveCameraShape()
else:
shapePath = cls._dcc_getNodShapeNodepathStr(nodepathString)
#
if optionKwargs is None:
optionKwargs = cls.MaDefCameraOptionKwargs.copy()
#
cmds.camera(
shapePath,
edit=1,
**optionKwargs
)
#
cmds.setAttr(
shapePath + '.displayGateMaskOpacity',
cls.MaDefDisplayGateMaskOpacity
)
cmds.setAttr(
shapePath + '.displayGateMaskColor',
*cls.MaDefDisplayGateMaskColor, type='double3'
)
@staticmethod
def getActiveCameraShape():
cameraView, | |
<reponame>victorcesc/sistema-gerador-avaliacoes
import random
from flask import render_template, flash, request
from flask_bootstrap import Bootstrap
from flask_nav import Nav
from flask_nav.elements import Navbar, View, Subgroup, Link
from werkzeug.utils import redirect
from forms import *
from models import *
boostrap = Bootstrap(app) # isso habilita o template bootstrap/base.html
nav = Nav()
nav.init_app(app) # isso habilita a criação de menus de navegação do pacote Flask-Nav
@nav.navigation()
def meunavbar():
menu = Navbar('Minha aplicação')
menu.items = [View('Home', 'inicio')]
if session.get('logged_in') is True:
menu.items.append(Subgroup('Disciplina', View('Cadastro de Disciplina', 'cadastroDisciplina'), View('Listar Disciplina', 'listarDisciplina')))
menu.items.append(Subgroup('Assunto', View('Cadastro de Assuntos', 'cadastroAssunto'), View('Listar Assuntos', 'listarAssunto')))
menu.items.append(Subgroup('Questoes', View('Cadastro de Questoes', 'cadastroQuestao'), View('Listar Questoes', 'listarQuestao')))
menu.items.append(Subgroup('Alternativas', View('Cadastro de Alternativas', 'cadastroAlternativa'), View('Listar Alternativas', 'listarAlternativa')))
menu.items.append(Subgroup('Avaliacoes', View('Cadastro de Avaliacoes', 'cadastroAvaliacao'), View('Listar Avaliacoes', 'listarAvaliacao')))
menu.items.append(View('Sair','sair'))
else:
menu.items.append(View('Login', 'autenticar'))
menu.items.append(View('Registrar','cadastro'))
menu.items.append(Link('Ajuda','https://www.google.com'))
return menu
@app.route('/registro', methods=['GET', 'POST'])
def cadastro():
form = CadastroForm()
if form.validate_on_submit():
usuarioLogin = form.username.data
usuarioPassword = form.password.data
if Usuario.query.filter_by(login=form.username.data).first() != None:
flash("O username {} já existe, digite outro".format(usuarioLogin), 'error')
else:
novo_usuario = Usuario(login=usuarioLogin, senha=usuarioPassword)
db.session.add(novo_usuario)
db.session.commit()
flash('Usuário {} criado com sucesso'.format(usuarioLogin))
return render_template('index.html', title="Usuário registrado")
else:
return render_template('registro.html', title='Cadastro de usuário', form=form)
@app.route('/login', methods=['GET', 'POST'])
def autenticar():
form = LoginForm()
if form.validate_on_submit():
# Veja mais em: http://flask-sqlalchemy.pocoo.org/2.3/queries/#querying-records
usuario = Usuario.query.filter_by(login=form.username.data).first_or_404()
if (usuario.check_password(form.password.data)):
session['logged_in'] = True
session['idUsuario'] = usuario.get_idUsuario()
flash('Bem vindo {}'.format(usuario.login))
return render_template('autenticado.html', title="Usuário autenticado",usuario=usuario.get_idUsuario())
else:
flash('Usuário ou senha inválidos')
return render_template('login.html', title='Autenticação de usuários', form=form)
else:
return render_template('login.html', title='Autenticação de usuários', form=form)
#<EMAIL>('/disciplina')
#def cadastrarDisciplina():
@app.route('/cadastroDisciplina',methods=['GET','POST'])
def cadastroDisciplina():
form = CadastroDisciplinaForm()
if form.validate_on_submit():
nomeDisciplina = form.nomeDisciplina.data
if Disciplina.query.filter_by(nomeDisciplina=nomeDisciplina,idUsuario=session.get('idUsuario'),statusDisc='ativo').first() != None:
flash('A disciplina {} ja existe, digite outra'.format(nomeDisciplina),'error')
return render_template('registro.html',title='Cadastro Disciplina',form=form)
else:
nova_disciplina = Disciplina(nomeDisciplina=nomeDisciplina)
db.session.add(nova_disciplina)
db.session.commit()
flash('Disciplina {} criada com sucesso'.format(nomeDisciplina))
return render_template('registro.html',title='Cadastro Disciplina',form=form)
return render_template('registro.html', title='Cadastro de disciplina', form=form)
@app.route('/listarDisciplina',methods=['GET','POST'])
def listarDisciplina():
listaD = Disciplina.query.filter_by(idUsuario=session.get('idUsuario'), statusDisc='ativo').all()
listadF = [(Disciplina.idDisciplina, Disciplina.nomeDisciplina) for Disciplina in listaD]
form = ListarDisciplinasForm()
form.disciplinas.choices = listadF
if form.disciplinas.data != None:
if form.submitExcluir.data:
idDisciplina = form.disciplinas.data
disciplina = Disciplina.query.filter_by(idDisciplina=idDisciplina, statusDisc='ativo').first()
disciplina.desativarDisciplina()
db.session.commit()
flash('Disciplina excluida com sucesso')
return render_template('listarDisciplina.html', title='Listar Disciplinas', form=form)
if form.submitEditar.data:
idDisciplina = form.disciplinas.data
return redirect(url_for('editarDisciplina', idDisciplina=idDisciplina))
return render_template('listarDisciplina.html', title='Listar Disciplinas', form=form)
@app.route('/editarDisciplina', methods=['GET', 'POST'])
def editarDisciplina():
if session.get('logged_in') is False:
return inicio()
idDisciplina = str(request.args.get('idDisciplina'))
disciplina = Disciplina.query.filter_by(idDisciplina=idDisciplina).first()
if disciplina is None:
flash('Selecione uma disciplina')
return redirect(url_for('listarDisciplina'))
form = EditarDisciplinaForm(nomeDisciplina=disciplina.get_nomeDisciplina())
if request.method == 'GET':
# pegar o id da pessoa via GET (parâmetro id na URL)
if int(disciplina.get_idUsuario()) != session['idUsuario']:
return inicio()
if disciplina is None:
return redirect(url_for('listarDisciplinas'))
return render_template('editar.html', title='Editar disciplina', form=form, disciplina=disciplina)
else:
novaDisciplina = form.novaDisciplina.data
if novaDisciplina == disciplina.get_nomeDisciplina():
flash("Digite um nome diferente pra disciplina", 'error')
return render_template('editar.html', title='Editar disciplina', form=form, disciplina=disciplina)
else:
if Disciplina.query.filter_by(nomeDisciplina=novaDisciplina,statusDisc='ativo',
idUsuario=session.get('idUsuario')).first() != None:
flash("A disciplina {} já existe".format(novaDisciplina), 'error')
return render_template('editar.html', title='Editar disciplina', form=form, disciplina=disciplina)
disciplina.set_nomeDisciplina(novaDisciplina)
db.session.commit()
flash("Disciplina alterada com sucesso!")
return render_template('editar.html', title='Editar disciplina', form=form, disciplina=disciplina)
@app.route('/cadastroAssunto',methods=['GET', 'POST'])
def cadastroAssunto():
listaD = Disciplina.query.filter_by(idUsuario=session.get('idUsuario'), statusDisc='ativo').all()
listadF = [(Disciplina.idDisciplina , Disciplina.nomeDisciplina) for Disciplina in listaD]
form = CadastroAssuntoForm()
form.disciplinas.choices = listadF
if form.validate_on_submit():
nomeAssunto = form.nomeAssunto.data
idDisciplina = form.disciplinas.data
if Assunto.query.filter_by(nomeAssunto=nomeAssunto,statusAss='ativo',idDisciplina=idDisciplina).first()!=None:
flash('O assunto {} para a disciplina selecionada ja existe, digite outro'.format(nomeAssunto), 'error')
return render_template('cadastroAssunto.html',title='Cadastro de Assunto',disciplinas=listaD,form=form)
else:
novo_assunto = Assunto(nomeAssunto=nomeAssunto,idDisciplina=idDisciplina)
db.session.add(novo_assunto)
db.session.commit()
flash('Assunto {} criado com sucesso'.format(form.nomeAssunto.data))
return render_template('cadastroAssunto.html', title='Cadastro de Assunto', disciplinas=listaD, form=form)
return render_template('cadastroAssunto.html', title='Cadastro de Assunto', disciplinas=listaD,form=form)
@app.route('/listarAssunto',methods=['GET','POST'])
def listarAssunto():
listaD = Disciplina.query.filter_by(idUsuario=session.get('idUsuario'), statusDisc='ativo').all()
listadF = [(Disciplina.idDisciplina, Disciplina.nomeDisciplina) for Disciplina in listaD]
form = ListarAssuntoForm()
form.disciplinas.choices = listadF
idDisciplina = form.disciplinas.data
listaA = Assunto.query.filter_by(idDisciplina=idDisciplina, statusAss='ativo').all()
listaAf = [(Assunto.idAssunto, Assunto.nomeAssunto) for Assunto in listaA]
form.assuntos.choices = listaAf
if form.validate_on_submit():
if form.submit2.data and form.assuntos.choices!=None:
idAssunto = form.assuntos.data
assunto = Assunto.query.filter_by(idAssunto=idAssunto,statusAss='ativo').first()
assunto.desativarAssunto()
db.session.commit()
flash("Assunto excluído com sucesso!")
return redirect(url_for('listarAssunto'))
if form.submit3.data and form.assuntos.choices!=None:
idAssunto = form.assuntos.data
return redirect(url_for('editarAssunto',idAssunto=idAssunto))
return render_template('listarAssunto.html',nomeColuna= 'Assuntos',title='Listar Assuntos',form=form)
@app.route('/editarAssunto',methods=['GET','POST'])
def editarAssunto():
if session.get('logged_in') is False:
return inicio()
idAssunto = str(request.args.get('idAssunto'))
assunto = Assunto.query.filter_by(idAssunto=idAssunto).first()
form = EditarAssuntoForm(nomeAssunto=assunto.get_nomeAssunto())
if request.method == 'GET':
# pegar o id da pessoa via GET (parâmetro id na URL)
if assunto is None:
return redirect(url_for('listarAssuntos'))
return render_template('editarAssunto.html', title='Editar assunto', form=form)
else:
novoAssunto = form.novoAssunto.data
if novoAssunto == assunto.get_nomeAssunto():
flash("Digite um nome diferente pro Assunto", 'error')
return render_template('editarAssunto.html',title='Editar Assunto',form=form)
else:
if Assunto.query.filter_by(nomeAssunto=novoAssunto,statusAss='ativo',idDisciplina=assunto.idDisciplina).first() != None:
flash("O assunto {} já existe".format(novoAssunto), 'error')
return render_template('editarAssunto.html', title='Editar Assunto', form=form)
assunto.set_nomeAssunto(novoAssunto)
db.session.commit()
flash("Assunto alterado com sucesso!")
return render_template('editarAssunto.html', title='Editar Assunto', form=form)
@app.route('/cadastroQuestao',methods=['GET','POST'])
def cadastroQuestao():
listaD = Disciplina.query.filter_by(idUsuario=session.get('idUsuario'), statusDisc='ativo').all()
listadF = [(Disciplina.idDisciplina, Disciplina.nomeDisciplina) for Disciplina in listaD]
form = CadastroQuestaoForm()
form.disciplinas.choices = listadF
idDisciplina = form.disciplinas.data
listaA = Assunto.query.filter_by(idDisciplina=idDisciplina, statusAss='ativo').all()
listaAf = [(Assunto.idAssunto, Assunto.nomeAssunto) for Assunto in listaA]
form.assuntos.choices = listaAf
idAssunto = form.assuntos.data
if form.submitCadastro.data:
if form.pergunta.data!='' and form.resposta.data!='' and idAssunto!=None:
pergunta = form.pergunta.data
resposta = form.resposta.data
tipo = form.tipo.data
if Questao.query.filter_by(idAssunto=idAssunto,pergunta=pergunta,resposta=resposta,tipo=tipo,statusQ='ativo').first()!=None:
flash('A questao {} ja existe'.format(pergunta))
return render_template('cadastroQuestao.html',title='Cadastro de Questao',form=form)
else:
nova_questao = Questao(idAssunto=idAssunto,pergunta=pergunta,resposta=resposta,tipo=tipo,vezesUsada=0,statusQ='ativo')
db.session.add(nova_questao)
db.session.commit()
flash('Questao " {} " cadastrada com sucesso'.format(nova_questao.get_pergunta()))
return render_template('cadastroQuestao.html',title='Cadastro de Questao',form=form)
else:
flash('Preencha todos os campos!')
return render_template('cadastroQuestao.html',title='Cadastro de Questao',form=form)
return render_template('cadastroQuestao.html',title='Cadastro de Questao',form=form)
@app.route('/listarQuestao',methods=['GET','POST'])
def listarQuestao():
listaD = Disciplina.query.filter_by(idUsuario=session.get('idUsuario'), statusDisc='ativo').all()
listadF = [(Disciplina.idDisciplina, Disciplina.nomeDisciplina) for Disciplina in listaD]
form = ListarQuestaoForm()
form.disciplinas.choices = listadF
idDisciplina = form.disciplinas.data
listaA = Assunto.query.filter_by(idDisciplina=idDisciplina, statusAss='ativo').all()
listaAf = [(Assunto.idAssunto, Assunto.nomeAssunto) for Assunto in listaA]
form.assuntos.choices = listaAf
idAssunto = form.assuntos.data
listaQ = Questao.query.filter_by(idAssunto=idAssunto,statusQ='ativo').all()
listaQf = [(Questao.idQuestao,Questao.pergunta) for Questao in listaQ]
form.questoes.choices = listaQf
if form.questoes.data!=None:
if form.submitExcluir.data:
idQuestao = form.questoes.data
questao = Questao.query.filter_by(idQuestao=idQuestao, statusQ='ativo').first()
questao.desativarQuestao()
db.session.commit()
flash('Questao excluida com sucesso')
return render_template('listarQuestao.html', title='Listar Questoes', form=form)
if form.submitEditar.data:
idQuestao = form.questoes.data
return redirect(url_for('editarQuestao', idQuestao=idQuestao))
return render_template('listarQuestao.html', title='Listar Questoes', form=form)
@app.route('/editarQuestao',methods=['GET','POST'])
def editarQuestao():
idQuestao = str(request.args.get('idQuestao'))
questao = Questao.query.filter_by(idQuestao=idQuestao).first()
form = EditarQuestaoForm(pergunta= questao.get_pergunta(),tipo=questao.get_tipo())
if request.method == 'GET':
if questao is None:
return redirect(url_for('listarQuestao'))
return render_template('editarQuestao.html', title='Editar Questao', form=form)
else:
novaPergunta = form.nova_pergunta.data
novaResposta = form.nova_resposta.data
tipo = form.novo_tipo.data
if novaPergunta == questao.get_pergunta() and novaResposta == questao.get_resposta():
flash("Digite um nome diferente para a Questao", 'error')
return render_template('editarQuestao.html', title='Editar Questao', form=form)
else:
if Questao.query.filter_by(pergunta=novaPergunta,resposta=novaResposta, statusQ='ativo',
idAssunto=questao.idAssunto).first() != None:
flash('A questao "{}" já existe'.format(novaPergunta), 'error')
return render_template('editarQuestao.html', title='Editar Questao', form=form)
questao.set_pergunta(novaPergunta)
questao.set_resposta(novaResposta)
questao.set_tipo(tipo)
db.session.commit()
flash("Questao alterada com sucesso!")
return render_template('editarQuestao.html',form=form)
@app.route('/cadastroAlternativa',methods=['GET','POST'])
def cadastroAlternativa():
listaD = Disciplina.query.filter_by(idUsuario=session.get('idUsuario'), statusDisc='ativo').all()
listadF = [(Disciplina.idDisciplina, Disciplina.nomeDisciplina) for Disciplina in listaD]
form = CadastroAlternativaForm()
form.disciplinas.choices = listadF
idDisciplina = form.disciplinas.data
listaA = Assunto.query.filter_by(idDisciplina=idDisciplina, statusAss='ativo').all()
listaAf = [(Assunto.idAssunto, Assunto.nomeAssunto) for Assunto in listaA]
form.assuntos.choices = listaAf
idAssunto = form.assuntos.data
listaQ = Questao.query.filter_by(idAssunto=idAssunto,tipo='Multipla Escolha', statusQ='ativo').all()
listaQf = [(Questao.idQuestao, Questao.pergunta) for Questao in listaQ]
form.questoes.choices = listaQf
idQuestao = form.questoes.data
if form.submitCadastro.data:
if form.disciplinas.data!= None and form.questoes.data != None:
alternativa = form.alternativa.data
if Alternativas.query.filter_by(idAssunto=idAssunto,idQuestao=idQuestao, alternativa=alternativa,
statusAl='ativo').first() != None:
flash('A alternativa {} ja existe'.format(alternativa))
return render_template('cadastroAlternativa.html', title='Cadastro de Alternativas', form=form)
else:
nova_alternativa = Alternativas(idAssunto=idAssunto,idQuestao=idQuestao,alternativa=alternativa, statusAl='ativo')
db.session.add(nova_alternativa)
db.session.commit()
flash('Alternativa " {} " cadastrada com sucesso'.format(nova_alternativa.get_alternativa()))
return render_template('cadastroAlternativa.html', title='Cadastro de Alternativas', form=form)
else:
flash('Preencha todos os campos!')
return render_template('cadastroAlternativa.html', title='Cadastro de Alternativas', form=form)
return render_template('cadastroAlternativa.html',title='Cadastro de Alternativas',form=form)
@app.route('/listarAlternativa', methods=['GET','POST'])
def listarAlternativa():
listaD = Disciplina.query.filter_by(idUsuario=session.get('idUsuario'), statusDisc='ativo').all()
listadF = [(Disciplina.idDisciplina, Disciplina.nomeDisciplina) for Disciplina in listaD]
form = ListarAlternativaForm()
form.disciplinas.choices = listadF
idDisciplina = form.disciplinas.data
listaA = Assunto.query.filter_by(idDisciplina=idDisciplina, statusAss='ativo').all()
listaAf = [(Assunto.idAssunto, Assunto.nomeAssunto) for Assunto in listaA]
form.assuntos.choices = listaAf
idAssunto = form.assuntos.data
listaQ = Questao.query.filter_by(idAssunto=idAssunto, tipo='Multipla Escolha', statusQ='ativo').all()
listaQf = [(Questao.idQuestao, Questao.pergunta) for Questao in listaQ]
form.questoes.choices = listaQf
idQuestao = form.questoes.data
listaAl = Alternativas.query.filter_by(idAssunto=idAssunto,idQuestao=idQuestao,statusAl='ativo').all()
listaAlf = [(Alternativas.idAlternativas, Alternativas.alternativa) for Alternativas in listaAl]
form.alternativas.choices = listaAlf
if form.alternativas.data != None:
if form.submitExcluir.data:
idAlternativas = form.alternativas.data
alternativa = Alternativas.query.filter_by(idAlternativas=idAlternativas, statusAl='ativo').first()
alternativa.desativarAlternativa()
db.session.commit()
flash('Alternativa excluida com sucesso')
return render_template('listarAlternativa.html', title='Listar Alternativas', form=form)
if form.submitEditar.data:
idAlternativas = form.alternativas.data
return redirect(url_for('editarAlternativa', idAlternativas=idAlternativas))
return render_template('listarAlternativa.html', title='Listar Alternativa', form=form)
@app.route('/editarAlternativa',methods=['GET','POST'])
def editarAlternativa():
idAlternativas = str(request.args.get('idAlternativas'))
alternativa = Alternativas.query.filter_by(idAlternativas=idAlternativas,statusAl='ativo').first()
form = EditarAlternativaForm(alternativa=alternativa.get_alternativa())
if request.method == 'GET':
if alternativa is None:
return redirect(url_for('listarAlternativa'))
return render_template('editarAlternativa.html', title='Editar Alternativa', form=form)
else:
novaAlternativa = form.nova_alternativa.data
if novaAlternativa== alternativa.get_alternativa():
flash("Digite um nome diferente pra Alternativa", 'error')
return render_template('editarAlternativa.html', title='Editar Alternativa', form=form)
else:
if Alternativas.query.filter_by(alternativa=novaAlternativa, idQuestao=str(alternativa.get_idQuestao), idAssunto=str(alternativa.get_idAssunto), statusAl='ativo').first() != None:
flash('A alternativa "{}" já existe'.format(novaAlternativa), 'error')
return render_template('editarAlternativa.html', title='Editar Alternativa', form=form)
alternativa.set_alternativa(novaAlternativa)
db.session.commit()
flash("Alternativa alterada com sucesso!")
return redirect(url_for('listarAlternativa'))
@app.route('/cadastroAvaliacao',methods=['GET','POST'])
def cadastroAvaliacao():
listaD = Disciplina.query.filter_by(idUsuario=session.get('idUsuario'), statusDisc='ativo').all()
listadF = [(Disciplina.idDisciplina, Disciplina.nomeDisciplina) for Disciplina in listaD]
form = CadastroAvaliacaoForm()
form.disciplinas.choices = listadF
idDisciplina = form.disciplinas.data
listaA = Assunto.query.filter_by(idDisciplina=idDisciplina,statusAss='ativo').all()
listaAf = [(Assunto.idAssunto) for Assunto in listaA]
if form.is_submitted():
if form.nomeAvaliacao.data!='' and form.semestre.data!='' and form.ano.data!='' and form.numerodeQuestoes!='' and form.vezesUsadas.data != '' and form.disciplinas.data!=None and len(listaAf)!=0:
semestre = form.semestre.data
| |
is None
assert response_json[f"field_{field_id}"][1]["name"] == user_file_2.name
assert response_json[f"field_{field_id}"][1]["visible_name"] == "new_name_1.txt"
response = api_client.patch(
reverse(
"api:database:rows:item",
kwargs={"table_id": table.id, "row_id": response_json["id"]},
),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
response = api_client.get(
reverse(
"api:database:rows:item",
kwargs={"table_id": table.id, "row_id": response_json["id"]},
),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
response_json = response.json()
assert response_json[f"field_{field_id}"][0]["name"] == user_file_3.name
assert (
response_json[f"field_{field_id}"][0]["visible_name"]
== user_file_3.original_name
)
assert "localhost:8000" in response_json[f"field_{field_id}"][0]["url"]
assert response_json[f"field_{field_id}"][1]["name"] == user_file_2.name
assert response_json[f"field_{field_id}"][1]["visible_name"] == "new_name_1.txt"
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
response_json = response.json()
assert len(response_json["results"]) == 3
assert response_json["results"][0][f"field_{field_id}"] == []
assert response_json["results"][1][f"field_{field_id}"] == []
assert (
response_json["results"][2][f"field_{field_id}"][0]["name"] == user_file_3.name
)
assert (
"localhost:8000" in response_json["results"][2][f"field_{field_id}"][0]["url"]
)
assert (
response_json["results"][2][f"field_{field_id}"][1]["name"] == user_file_2.name
)
# We also need to check if the grid view returns the correct url because the
# request context must be provided there in order to work.
url = reverse("api:database:views:grid:list", kwargs={"view_id": grid.id})
response = api_client.get(url, **{"HTTP_AUTHORIZATION": f"JWT {token}"})
assert response.status_code == HTTP_200_OK
response_json = response.json()
assert len(response_json["results"]) == 3
assert response_json["results"][0][f"field_{field_id}"] == []
assert response_json["results"][1][f"field_{field_id}"] == []
assert (
response_json["results"][2][f"field_{field_id}"][0]["name"] == user_file_3.name
)
assert (
"localhost:8000" in response_json["results"][2][f"field_{field_id}"][0]["url"]
)
assert (
response_json["results"][2][f"field_{field_id}"][1]["name"] == user_file_2.name
)
@pytest.mark.django_db
def test_number_field_type(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table = data_fixture.create_database_table(user=user)
# Create a positive integer field
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "PositiveInt",
"type": "number",
"number_type": "INTEGER",
"number_negative": False,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
# Make sure the field was created properly
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["type"] == "number"
assert NumberField.objects.all().count() == 1
positive_int_field_id = response_json["id"]
# Create a negative integer field
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "NegativeInt",
"type": "number",
"number_type": "INTEGER",
"number_negative": True,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
# Make sure the field was created properly
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["type"] == "number"
assert NumberField.objects.all().count() == 2
negative_int_field_id = response_json["id"]
# Create a positive decimal field
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "PositiveDecimal",
"type": "number",
"number_type": "DECIMAL",
"number_negative": False,
"number_decimal_places": 2,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
# Make sure the field was created properly
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["type"] == "number"
assert NumberField.objects.all().count() == 3
positive_decimal_field_id = response_json["id"]
# Create a negative decimal field
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "NegativeDecimal",
"type": "number",
"number_type": "DECIMAL",
"number_negative": True,
"number_decimal_places": 2,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
# Make sure the field was created properly
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["type"] == "number"
assert NumberField.objects.all().count() == 4
negative_decimal_field_id = response_json["id"]
# Test re-writing the name of a field. 'PositiveInt' is now called 'PositiveIntEdit'
response = api_client.patch(
reverse("api:database:fields:item", kwargs={"field_id": positive_int_field_id}),
{"name": "PositiveIntEdit"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
# Add a row with correct values
valid_pos_int = "99999999999999999999999999999999999999999999999999"
valid_neg_int = "-99999999999999999999999999999999999999999999999999"
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{
f"field_{positive_int_field_id}": valid_pos_int,
f"field_{negative_int_field_id}": valid_neg_int,
f"field_{positive_decimal_field_id}": 1000.00,
f"field_{negative_decimal_field_id}": -1000.00,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{positive_int_field_id}"] == valid_pos_int
assert response_json[f"field_{negative_int_field_id}"] == valid_neg_int
assert response_json[f"field_{positive_decimal_field_id}"] == "1000.00"
assert response_json[f"field_{negative_decimal_field_id}"] == "-1000.00"
model = table.get_model(attribute_names=True)
row = model.objects.all().last()
assert row.positiveintedit == Decimal(valid_pos_int)
assert row.negativeint == Decimal(valid_neg_int)
assert row.positivedecimal == Decimal(1000.00)
assert row.negativedecimal == Decimal(-1000.00)
# Add a row with Nones'
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{
f"field_{positive_int_field_id}": None,
f"field_{negative_int_field_id}": None,
f"field_{positive_decimal_field_id}": None,
f"field_{negative_decimal_field_id}": None,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{positive_int_field_id}"] is None
assert response_json[f"field_{negative_int_field_id}"] is None
assert response_json[f"field_{positive_decimal_field_id}"] is None
assert response_json[f"field_{negative_decimal_field_id}"] is None
row = model.objects.all().last()
assert row.positiveintedit is None
assert row.negativeint is None
assert row.positivedecimal is None
assert row.negativedecimal is None
# Add a row with an integer that's too big
invalid_pos_int = "999999999999999999999999999999999999999999999999999"
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{
f"field_{positive_int_field_id}": invalid_pos_int,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
assert (
response_json["detail"][f"field_{positive_int_field_id}"][0]["code"]
== "max_digits"
)
# Add a row with an integer that's too small
invalid_neg_int = "-9999999999999999999999999999999999999999999999999999"
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{
f"field_{negative_int_field_id}": invalid_neg_int,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
assert (
response_json["detail"][f"field_{positive_int_field_id}"][0]["code"]
== "max_digits"
)
@pytest.mark.django_db
def test_phone_number_field_type(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table = data_fixture.create_database_table(user=user)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "phone", "type": "phone_number"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["type"] == "phone_number"
assert PhoneNumberField.objects.all().count() == 1
field_id = response_json["id"]
response = api_client.patch(
reverse("api:database:fields:item", kwargs={"field_id": field_id}),
{"name": "Phone"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
expected_phone_number = "+44761198672"
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": expected_phone_number},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == expected_phone_number
model = table.get_model(attribute_names=True)
row = model.objects.all().last()
assert row.phone == expected_phone_number
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": ""},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == ""
row = model.objects.all().last()
assert row.phone == ""
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": None},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == ""
row = model.objects.all().last()
assert row.phone == ""
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == ""
row = model.objects.all().last()
assert row.phone == ""
email = reverse("api:database:fields:item", kwargs={"field_id": field_id})
response = api_client.delete(email, HTTP_AUTHORIZATION=f"JWT {token}")
assert response.status_code == HTTP_200_OK
assert PhoneNumberField.objects.all().count() == 0
@pytest.mark.django_db
def test_last_modified_field_type(api_client, data_fixture):
time_under_test = "2021-08-10 12:00"
with freeze_time(time_under_test):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table = data_fixture.create_database_table(user=user)
# first add text field so that there is already a row with an
# updated_on value
text_field = data_fixture.create_text_field(user=user, table=table)
with freeze_time(time_under_test):
api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{text_field.id}": "Test Text"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
# now add a last_modified field with datetime
with freeze_time(time_under_test):
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "Last",
"type": "last_modified",
"date_include_time": True,
"timezone": "Europe/Berlin",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["type"] == "last_modified"
assert LastModifiedField.objects.all().count() == 1
last_modified_field_id = response_json["id"]
assert last_modified_field_id
# verify that the timestamp is the same as the updated_on column
model = table.get_model(attribute_names=True)
row = model.objects.all().last()
assert row.last == row.updated_on
# change the text_field value so that we can verify that the
# last_modified column gets updated as well
with freeze_time(time_under_test):
response = api_client.patch(
reverse(
"api:database:rows:item",
kwargs={"table_id": table.id, "row_id": row.id},
),
{f"field_{text_field.id}": "test_second"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response.json()
assert response.status_code == HTTP_200_OK
last_datetime = row.last
updated_on_datetime = row.updated_on
assert last_datetime == updated_on_datetime
with freeze_time(time_under_test):
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{
f"field_{last_modified_field_id}": "2021-08-05",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
with freeze_time(time_under_test):
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{
f"field_{last_modified_field_id}": "2021-08-09T14:14:33.574356Z",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
@pytest.mark.django_db
def test_created_on_field_type(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table = data_fixture.create_database_table(user=user)
# first add text field so that there is already a row with an
# updated_on and a created_on value
text_field = data_fixture.create_text_field(user=user, table=table)
api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{text_field.id}": "Test Text"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
# now add a created_on field with datetime
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "Create",
"type": "created_on",
"date_include_time": True,
"timezone": "Europe/Berlin",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["type"] == "created_on"
assert CreatedOnField.objects.all().count() == 1
created_on_field_id = response_json["id"]
assert created_on_field_id
# verify that the timestamp is the same as the updated_on column
model = table.get_model(attribute_names=True)
row = model.objects.all().last()
assert row.create == row.created_on
# change the text_field value so that we can verify that the
# created_on column does NOT get updated
response = api_client.patch(
reverse(
"api:database:rows:item",
kwargs={"table_id": table.id, "row_id": row.id},
),
{f"field_{text_field.id}": "test_second"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response.json()
assert response.status_code == HTTP_200_OK
row = model.objects.all().last()
create_datetime = row.create
created_on_datetime = | |
# Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import argparse
import collections
import getpass
import mock
from mock import call
from novaclient import api_versions
from openstack import exceptions as sdk_exceptions
from osc_lib import exceptions
from osc_lib import utils as common_utils
from oslo_utils import timeutils
import six
from openstackclient.compute.v2 import server
from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes
from openstackclient.tests.unit.image.v2 import fakes as image_fakes
from openstackclient.tests.unit.network.v2 import fakes as network_fakes
from openstackclient.tests.unit import utils
from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes
class TestServer(compute_fakes.TestComputev2):
def setUp(self):
super(TestServer, self).setUp()
# Get a shortcut to the compute client ServerManager Mock
self.servers_mock = self.app.client_manager.compute.servers
self.servers_mock.reset_mock()
# Get a shortcut to the compute client FlavorManager Mock
self.flavors_mock = self.app.client_manager.compute.flavors
self.flavors_mock.reset_mock()
# Get a shortcut to the image client ImageManager Mock
self.images_mock = self.app.client_manager.image.images
self.images_mock.reset_mock()
# Get a shortcut to the volume client VolumeManager Mock
self.volumes_mock = self.app.client_manager.volume.volumes
self.volumes_mock.reset_mock()
# Get a shortcut to the volume client VolumeManager Mock
self.snapshots_mock = self.app.client_manager.volume.volume_snapshots
self.snapshots_mock.reset_mock()
# Set object attributes to be tested. Could be overwritten in subclass.
self.attrs = {}
# Set object methods to be tested. Could be overwritten in subclass.
self.methods = {}
def setup_servers_mock(self, count):
servers = compute_fakes.FakeServer.create_servers(attrs=self.attrs,
methods=self.methods,
count=count)
# This is the return value for utils.find_resource()
self.servers_mock.get = compute_fakes.FakeServer.get_servers(servers,
0)
return servers
def run_method_with_servers(self, method_name, server_count):
servers = self.setup_servers_mock(server_count)
arglist = []
verifylist = []
for s in servers:
arglist.append(s.id)
verifylist = [
('server', arglist),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
for s in servers:
method = getattr(s, method_name)
method.assert_called_with()
self.assertIsNone(result)
class TestServerAddFixedIP(TestServer):
def setUp(self):
super(TestServerAddFixedIP, self).setUp()
# Get the command object to test
self.cmd = server.AddFixedIP(self.app, None)
# Set add_fixed_ip method to be tested.
self.methods = {
'interface_attach': None,
}
def _test_server_add_fixed_ip(self, extralist, fixed_ip_address):
servers = self.setup_servers_mock(count=1)
network = compute_fakes.FakeNetwork.create_one_network()
with mock.patch(
'openstackclient.api.compute_v2.APIv2.network_find'
) as net_mock:
net_mock.return_value = network
arglist = [
servers[0].id,
network['id'],
] + extralist
verifylist = [
('server', servers[0].id),
('network', network['id']),
('fixed_ip_address', fixed_ip_address),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
servers[0].interface_attach.assert_called_once_with(
port_id=None,
net_id=network['id'],
fixed_ip=fixed_ip_address,
)
self.assertIsNone(result)
def test_server_add_fixed_ip(self):
self._test_server_add_fixed_ip([], None)
def test_server_add_specific_fixed_ip(self):
extralist = ['--fixed-ip-address', '192.168.127.12']
self._test_server_add_fixed_ip(extralist, '192.168.127.12')
@mock.patch(
'openstackclient.api.compute_v2.APIv2.floating_ip_add'
)
class TestServerAddFloatingIPCompute(compute_fakes.TestComputev2):
def setUp(self):
super(TestServerAddFloatingIPCompute, self).setUp()
self.app.client_manager.network_endpoint_enabled = False
# Get the command object to test
self.cmd = server.AddFloatingIP(self.app, None)
def test_server_add_floating_ip_default(self, fip_mock):
_floating_ip = compute_fakes.FakeFloatingIP.create_one_floating_ip()
arglist = [
'server1',
_floating_ip['ip'],
]
verifylist = [
('server', 'server1'),
('ip_address', _floating_ip['ip']),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
fip_mock.assert_called_once_with(
'server1',
_floating_ip['ip'],
fixed_address=None,
)
def test_server_add_floating_ip_fixed(self, fip_mock):
_floating_ip = compute_fakes.FakeFloatingIP.create_one_floating_ip()
arglist = [
'--fixed-ip-address', _floating_ip['fixed_ip'],
'server1',
_floating_ip['ip'],
]
verifylist = [
('fixed_ip_address', _floating_ip['fixed_ip']),
('server', 'server1'),
('ip_address', _floating_ip['ip']),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
fip_mock.assert_called_once_with(
'server1',
_floating_ip['ip'],
fixed_address=_floating_ip['fixed_ip'],
)
class TestServerAddFloatingIPNetwork(
TestServer,
network_fakes.TestNetworkV2,
):
def setUp(self):
super(TestServerAddFloatingIPNetwork, self).setUp()
self.app.client_manager.network = mock.Mock()
self.network = self.app.client_manager.network
self.network.update_ip = mock.Mock(return_value=None)
# Get the command object to test
self.cmd = server.AddFloatingIP(self.app, self.namespace)
def test_server_add_floating_ip_default(self):
_server = compute_fakes.FakeServer.create_one_server()
self.servers_mock.get.return_value = _server
_port = network_fakes.FakePort.create_one_port()
_floating_ip = network_fakes.FakeFloatingIP.create_one_floating_ip()
self.network.find_ip = mock.Mock(return_value=_floating_ip)
self.network.ports = mock.Mock(return_value=[_port])
arglist = [
_server.id,
_floating_ip['floating_ip_address'],
]
verifylist = [
('server', _server.id),
('ip_address', _floating_ip['floating_ip_address']),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
attrs = {
'port_id': _port.id,
}
self.network.find_ip.assert_called_once_with(
_floating_ip['floating_ip_address'],
ignore_missing=False,
)
self.network.ports.assert_called_once_with(
device_id=_server.id,
)
self.network.update_ip.assert_called_once_with(
_floating_ip,
**attrs
)
def test_server_add_floating_ip_default_no_external_gateway(self,
success=False):
_server = compute_fakes.FakeServer.create_one_server()
self.servers_mock.get.return_value = _server
_port = network_fakes.FakePort.create_one_port()
_floating_ip = network_fakes.FakeFloatingIP.create_one_floating_ip()
self.network.find_ip = mock.Mock(return_value=_floating_ip)
return_value = [_port]
# In the success case, we'll have two ports, where the first port is
# not attached to an external gateway but the second port is.
if success:
return_value.append(_port)
self.network.ports = mock.Mock(return_value=return_value)
side_effect = [sdk_exceptions.NotFoundException()]
if success:
side_effect.append(None)
self.network.update_ip = mock.Mock(side_effect=side_effect)
arglist = [
_server.id,
_floating_ip['floating_ip_address'],
]
verifylist = [
('server', _server.id),
('ip_address', _floating_ip['floating_ip_address']),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
if success:
self.cmd.take_action(parsed_args)
else:
self.assertRaises(sdk_exceptions.NotFoundException,
self.cmd.take_action, parsed_args)
attrs = {
'port_id': _port.id,
}
self.network.find_ip.assert_called_once_with(
_floating_ip['floating_ip_address'],
ignore_missing=False,
)
self.network.ports.assert_called_once_with(
device_id=_server.id,
)
if success:
self.assertEqual(2, self.network.update_ip.call_count)
calls = [mock.call(_floating_ip, **attrs)] * 2
self.network.update_ip.assert_has_calls(calls)
else:
self.network.update_ip.assert_called_once_with(
_floating_ip,
**attrs
)
def test_server_add_floating_ip_default_one_external_gateway(self):
self.test_server_add_floating_ip_default_no_external_gateway(
success=True)
def test_server_add_floating_ip_fixed(self):
_server = compute_fakes.FakeServer.create_one_server()
self.servers_mock.get.return_value = _server
_port = network_fakes.FakePort.create_one_port()
_floating_ip = network_fakes.FakeFloatingIP.create_one_floating_ip()
self.network.find_ip = mock.Mock(return_value=_floating_ip)
self.network.ports = mock.Mock(return_value=[_port])
# The user has specified a fixed ip that matches one of the ports
# already attached to the instance.
arglist = [
'--fixed-ip-address', _port.fixed_ips[0]['ip_address'],
_server.id,
_floating_ip['floating_ip_address'],
]
verifylist = [
('fixed_ip_address', _port.fixed_ips[0]['ip_address']),
('server', _server.id),
('ip_address', _floating_ip['floating_ip_address']),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
# We expect the update_ip call to specify a new fixed_ip_address which
# will overwrite the floating ip's existing fixed_ip_address.
attrs = {
'port_id': _port.id,
'fixed_ip_address': _port.fixed_ips[0]['ip_address'],
}
self.network.find_ip.assert_called_once_with(
_floating_ip['floating_ip_address'],
ignore_missing=False,
)
self.network.ports.assert_called_once_with(
device_id=_server.id,
)
self.network.update_ip.assert_called_once_with(
_floating_ip,
**attrs
)
def test_server_add_floating_ip_fixed_no_port_found(self):
_server = compute_fakes.FakeServer.create_one_server()
self.servers_mock.get.return_value = _server
_port = network_fakes.FakePort.create_one_port()
_floating_ip = network_fakes.FakeFloatingIP.create_one_floating_ip()
self.network.find_ip = mock.Mock(return_value=_floating_ip)
self.network.ports = mock.Mock(return_value=[_port])
# The user has specified a fixed ip that does not match any of the
# ports already attached to the instance.
nonexistent_ip = '10.0.0.9'
arglist = [
'--fixed-ip-address', nonexistent_ip,
_server.id,
_floating_ip['floating_ip_address'],
]
verifylist = [
('fixed_ip_address', nonexistent_ip),
('server', _server.id),
('ip_address', _floating_ip['floating_ip_address']),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(exceptions.CommandError, self.cmd.take_action,
parsed_args)
self.network.find_ip.assert_called_once_with(
_floating_ip['floating_ip_address'],
ignore_missing=False,
)
self.network.ports.assert_called_once_with(
device_id=_server.id,
)
self.network.update_ip.assert_not_called()
class TestServerAddPort(TestServer):
def setUp(self):
super(TestServerAddPort, self).setUp()
# Get the command object to test
self.cmd = server.AddPort(self.app, None)
# Set add_fixed_ip method to be tested.
self.methods = {
'interface_attach': None,
}
self.find_port = mock.Mock()
self.app.client_manager.network.find_port = self.find_port
def _test_server_add_port(self, port_id):
servers = self.setup_servers_mock(count=1)
port = 'fake-port'
arglist = [
servers[0].id,
port,
]
verifylist = [
('server', servers[0].id),
('port', port)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
servers[0].interface_attach.assert_called_once_with(
port_id=port_id, net_id=None, fixed_ip=None)
self.assertIsNone(result)
def test_server_add_port(self):
self._test_server_add_port(self.find_port.return_value.id)
self.find_port.assert_called_once_with(
'fake-port', ignore_missing=False)
def test_server_add_port_no_neutron(self):
self.app.client_manager.network_endpoint_enabled = False
self._test_server_add_port('fake-port')
self.find_port.assert_not_called()
class TestServerAddNetwork(TestServer):
def setUp(self):
super(TestServerAddNetwork, self).setUp()
# Get the command object to test
self.cmd = server.AddNetwork(self.app, None)
# Set add_fixed_ip method to be tested.
self.methods = {
'interface_attach': None,
}
self.find_network = mock.Mock()
self.app.client_manager.network.find_network = self.find_network
def _test_server_add_network(self, net_id):
servers = self.setup_servers_mock(count=1)
network = 'fake-network'
arglist = [
servers[0].id,
network,
]
verifylist = [
('server', servers[0].id),
('network', network)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
servers[0].interface_attach.assert_called_once_with(
port_id=None, net_id=net_id, fixed_ip=None)
self.assertIsNone(result)
def test_server_add_network(self):
self._test_server_add_network(self.find_network.return_value.id)
self.find_network.assert_called_once_with(
'fake-network', ignore_missing=False)
def test_server_add_network_no_neutron(self):
self.app.client_manager.network_endpoint_enabled = False
self._test_server_add_network('fake-network')
self.find_network.assert_not_called()
@mock.patch(
'openstackclient.api.compute_v2.APIv2.security_group_find'
)
class TestServerAddSecurityGroup(TestServer):
def setUp(self):
super(TestServerAddSecurityGroup, self).setUp()
self.security_group = \
compute_fakes.FakeSecurityGroup.create_one_security_group()
attrs = {
'security_groups': [{'name': self.security_group['id']}]
}
methods = {
'add_security_group': None,
}
self.server = compute_fakes.FakeServer.create_one_server(
attrs=attrs,
methods=methods
)
# This is the return value for utils.find_resource() for server
self.servers_mock.get.return_value = self.server
# Get the command object to test
self.cmd = server.AddServerSecurityGroup(self.app, None)
def test_server_add_security_group(self, sg_find_mock):
sg_find_mock.return_value = self.security_group
arglist = [
self.server.id,
self.security_group['id']
]
verifylist = [
('server', self.server.id),
('group', self.security_group['id']),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
sg_find_mock.assert_called_with(
self.security_group['id'],
)
self.servers_mock.get.assert_called_with(self.server.id)
self.server.add_security_group.assert_called_with(
self.security_group['id'],
)
self.assertIsNone(result)
class TestServerCreate(TestServer):
columns = (
'OS-EXT-STS:power_state',
'addresses',
'flavor',
'id',
'image',
'name',
'networks',
'properties',
)
def datalist(self):
datalist = (
server._format_servers_list_power_state(
getattr(self.new_server, 'OS-EXT-STS:power_state')),
'',
self.flavor.name + ' (' + self.new_server.flavor.get('id') + ')',
self.new_server.id,
self.image.name + ' (' + self.new_server.image.get('id') + ')',
self.new_server.name,
self.new_server.networks,
'',
)
return datalist
def setUp(self):
super(TestServerCreate, self).setUp()
attrs = {
'networks': {},
}
self.new_server = compute_fakes.FakeServer.create_one_server(
attrs=attrs)
# This is the return value for utils.find_resource().
# This is for testing --wait option.
self.servers_mock.get.return_value = self.new_server
self.servers_mock.create.return_value = self.new_server
self.image = image_fakes.FakeImage.create_one_image()
self.images_mock.get.return_value = self.image
self.flavor = compute_fakes.FakeFlavor.create_one_flavor()
self.flavors_mock.get.return_value = self.flavor
self.volume = volume_fakes.FakeVolume.create_one_volume()
self.volumes_mock.get.return_value = self.volume
self.snapshot = volume_fakes.FakeSnapshot.create_one_snapshot()
self.snapshots_mock.get.return_value = self.snapshot
# Get the command object to test
self.cmd = server.CreateServer(self.app, None)
def test_server_create_no_options(self):
arglist = [
self.new_server.name,
]
verifylist = [
('server_name', self.new_server.name),
]
self.assertRaises(utils.ParserException, self.check_parser,
self.cmd, arglist, verifylist)
def test_server_create_minimal(self):
arglist = [
'--image', 'image1',
'--flavor', 'flavor1',
self.new_server.name,
]
verifylist = [
('image', 'image1'),
('flavor', 'flavor1'),
('config_drive', False),
| |
#!/usr/bin/env python
"""Tests the client file finder action."""
import collections
import glob
import hashlib
import os
import platform
import shutil
import subprocess
import unittest
import mock
import psutil
import unittest
from grr.client import comms
from grr.client.client_actions import file_finder as client_file_finder
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import utils
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import crypto as rdf_crypto
from grr.lib.rdfvalues import file_finder as rdf_file_finder
from grr.lib.rdfvalues import standard as rdf_standard
from grr.test_lib import client_test_lib
from grr.test_lib import test_lib
def MyStat(path):
stat_obj = MyStat.old_target(path)
if path.endswith("auth.log"):
res = list(stat_obj)
# Sets atime, ctime, and mtime to some time in 2022.
res[-1] = 1672466423
res[-2] = 1672466423
res[-3] = 1672466423
return os.stat_result(res)
return stat_obj
class FileFinderTest(client_test_lib.EmptyActionTest):
def setUp(self):
super(FileFinderTest, self).setUp()
self.stat_action = rdf_file_finder.FileFinderAction.Stat()
def _GetRelativeResults(self, raw_results, base_path=None):
base_path = base_path or self.base_path
return [
result.stat_entry.pathspec.path[len(base_path) + 1:]
for result in raw_results
]
def _RunFileFinder(self,
paths,
action,
conditions=None,
follow_links=True,
**kw):
return self.RunAction(
client_file_finder.FileFinderOS,
arg=rdf_file_finder.FileFinderArgs(
paths=paths,
action=action,
conditions=conditions,
process_non_regular_files=True,
follow_links=follow_links,
**kw))
def testFileFinder(self):
paths = [self.base_path + "/*"]
results = self._RunFileFinder(paths, self.stat_action)
self.assertEqual(
self._GetRelativeResults(results), os.listdir(self.base_path))
profiles_path = os.path.join(self.base_path, "profiles/v1.0")
paths = [os.path.join(self.base_path, "profiles/v1.0") + "/*"]
results = self._RunFileFinder(paths, self.stat_action)
self.assertEqual(
self._GetRelativeResults(results, base_path=profiles_path),
os.listdir(profiles_path))
def testRecursiveGlob(self):
paths = [self.base_path + "/**3"]
results = self._RunFileFinder(paths, self.stat_action)
relative_results = self._GetRelativeResults(results)
self.assertIn("a/b", relative_results)
self.assertIn("a/b/c", relative_results)
self.assertIn("a/b/d", relative_results)
self.assertNotIn("a/b/c/helloc.txt", relative_results)
self.assertNotIn("a/b/d/hellod.txt", relative_results)
paths = [self.base_path + "/**4"]
results = self._RunFileFinder(paths, self.stat_action)
relative_results = self._GetRelativeResults(results)
self.assertIn("a/b", relative_results)
self.assertIn("a/b/c", relative_results)
self.assertIn("a/b/d", relative_results)
self.assertIn("a/b/c/helloc.txt", relative_results)
self.assertIn("a/b/d/hellod.txt", relative_results)
def testRegexGlob(self):
paths = [self.base_path + "/rekall*.gz"]
results = self._RunFileFinder(paths, self.stat_action)
relative_results = self._GetRelativeResults(results)
for glob_result in glob.glob(self.base_path + "/rekall*gz"):
self.assertIn(os.path.basename(glob_result), relative_results)
def testRecursiveRegexGlob(self):
paths = [self.base_path + "/**3/*.gz"]
results = self._RunFileFinder(paths, self.stat_action)
relative_results = self._GetRelativeResults(results)
self.assertIn("profiles/v1.0/nt/index.gz", relative_results)
self.assertIn("bigquery/ExportedFile.json.gz", relative_results)
for r in relative_results:
self.assertEqual(os.path.splitext(r)[1], ".gz")
paths = [self.base_path + "/**2/*.gz"]
results = self._RunFileFinder(paths, self.stat_action)
relative_results = self._GetRelativeResults(results)
self.assertNotIn("profiles/v1.0/nt/index.gz", relative_results)
self.assertIn("bigquery/ExportedFile.json.gz", relative_results)
for r in relative_results:
self.assertEqual(os.path.splitext(r)[1], ".gz")
def testDoubleRecursionFails(self):
paths = [self.base_path + "/**/**/test.exe"]
with self.assertRaises(ValueError):
self._RunFileFinder(paths, self.stat_action)
def testInvalidInput(self):
paths = [self.base_path + "/r**z"]
with self.assertRaises(ValueError):
self._RunFileFinder(paths, self.stat_action)
paths = [self.base_path + "/**.exe"]
with self.assertRaises(ValueError):
self._RunFileFinder(paths, self.stat_action)
paths = [self.base_path + "/test**"]
with self.assertRaises(ValueError):
self._RunFileFinder(paths, self.stat_action)
def testGroupings(self):
paths = [self.base_path + "/a/b/{c,d}/hello*"]
results = self._RunFileFinder(paths, self.stat_action)
relative_results = self._GetRelativeResults(results)
self.assertIn("a/b/c/helloc.txt", relative_results)
self.assertIn("a/b/d/hellod.txt", relative_results)
paths = [self.base_path + "/a/b/*/hello{c,d}.txt"]
results = self._RunFileFinder(paths, self.stat_action)
relative_results = self._GetRelativeResults(results)
self.assertIn("a/b/c/helloc.txt", relative_results)
self.assertIn("a/b/d/hellod.txt", relative_results)
def testFollowLinks(self):
try:
# This sets up a structure as follows:
# tmp_dir/lnk_test/contains_lnk
# tmp_dir/lnk_test/contains_lnk/lnk
# tmp_dir/lnk_test/lnk_target
# tmp_dir/lnk_test/lnk_target/target
# lnk is a symbolic link to lnk_target. A recursive find in
# contains_lnk will find the target iff follow_links is allowed.
test_dir = os.path.join(self.temp_dir, "lnk_test")
contains_lnk = os.path.join(test_dir, "contains_lnk")
lnk = os.path.join(contains_lnk, "lnk")
lnk_target = os.path.join(test_dir, "lnk_target")
lnk_target_contents = os.path.join(lnk_target, "target")
os.mkdir(test_dir)
os.mkdir(contains_lnk)
os.mkdir(lnk_target)
os.symlink(lnk_target, lnk)
with open(lnk_target_contents, "wb") as fd:
fd.write("sometext")
paths = [contains_lnk + "/**"]
results = self._RunFileFinder(paths, self.stat_action)
relative_results = self._GetRelativeResults(results, base_path=test_dir)
self.assertIn("contains_lnk/lnk", relative_results)
self.assertIn("contains_lnk/lnk/target", relative_results)
results = self._RunFileFinder(paths, self.stat_action, follow_links=False)
relative_results = self._GetRelativeResults(results, base_path=test_dir)
self.assertIn("contains_lnk/lnk", relative_results)
self.assertNotIn("contains_lnk/lnk/target", relative_results)
finally:
try:
shutil.rmtree(test_dir)
except OSError:
pass
def _PrepareTimestampedFiles(self):
searching_path = os.path.join(self.base_path, "searching")
test_dir = os.path.join(self.temp_dir, "times_test")
os.mkdir(test_dir)
for f in ["dpkg.log", "dpkg_false.log", "auth.log"]:
src = os.path.join(searching_path, f)
dst = os.path.join(test_dir, f)
shutil.copy(src, dst)
return test_dir
def RunAndCheck(self,
paths,
action=None,
conditions=None,
expected=None,
unexpected=None,
base_path=None,
**kw):
action = action or self.stat_action
raw_results = self._RunFileFinder(
paths, action, conditions=conditions, **kw)
relative_results = self._GetRelativeResults(
raw_results, base_path=base_path)
for f in unexpected:
self.assertNotIn(f, relative_results)
for f in expected:
self.assertIn(f, relative_results)
def testLiteralMatchCondition(self):
searching_path = os.path.join(self.base_path, "searching")
paths = [searching_path + "/{dpkg.log,dpkg_false.log,auth.log}"]
literal = "pam_unix(ssh:session)"
clmc = rdf_file_finder.FileFinderContentsLiteralMatchCondition
bytes_before = 10
bytes_after = 20
condition = rdf_file_finder.FileFinderCondition(
condition_type="CONTENTS_LITERAL_MATCH",
contents_literal_match=clmc(
literal=literal, bytes_before=bytes_before,
bytes_after=bytes_after))
raw_results = self._RunFileFinder(
paths, self.stat_action, conditions=[condition])
relative_results = self._GetRelativeResults(
raw_results, base_path=searching_path)
self.assertEqual(len(relative_results), 1)
self.assertIn("auth.log", relative_results)
self.assertEqual(len(raw_results[0].matches), 1)
buffer_ref = raw_results[0].matches[0]
orig_data = open(os.path.join(searching_path, "auth.log")).read()
self.assertEqual(
len(buffer_ref.data), bytes_before + len(literal) + bytes_after)
self.assertEqual(
orig_data[buffer_ref.offset:buffer_ref.offset + buffer_ref.length],
buffer_ref.data)
def testLiteralMatchConditionAllHits(self):
searching_path = os.path.join(self.base_path, "searching")
paths = [searching_path + "/{dpkg.log,dpkg_false.log,auth.log}"]
clmc = rdf_file_finder.FileFinderContentsLiteralMatchCondition
bytes_before = 10
bytes_after = 20
literal = "mydomain.com"
condition = rdf_file_finder.FileFinderCondition(
condition_type="CONTENTS_LITERAL_MATCH",
contents_literal_match=clmc(
literal=literal,
mode="ALL_HITS",
bytes_before=bytes_before,
bytes_after=bytes_after))
raw_results = self._RunFileFinder(
paths, self.stat_action, conditions=[condition])
self.assertEqual(len(raw_results), 1)
self.assertEqual(len(raw_results[0].matches), 6)
for buffer_ref in raw_results[0].matches:
self.assertEqual(
buffer_ref.data[bytes_before:bytes_before + len(literal)], literal)
def testLiteralMatchConditionLargeFile(self):
paths = [os.path.join(self.base_path, "new_places.sqlite")]
literal = "RecentlyBookmarked"
clmc = rdf_file_finder.FileFinderContentsLiteralMatchCondition
bytes_before = 10
bytes_after = 20
condition = rdf_file_finder.FileFinderCondition(
condition_type="CONTENTS_LITERAL_MATCH",
contents_literal_match=clmc(
literal=literal,
mode="ALL_HITS",
bytes_before=bytes_before,
bytes_after=bytes_after))
raw_results = self._RunFileFinder(
paths, self.stat_action, conditions=[condition])
self.assertEqual(len(raw_results), 1)
self.assertEqual(len(raw_results[0].matches), 1)
buffer_ref = raw_results[0].matches[0]
with open(paths[0], "rb") as fd:
fd.seek(buffer_ref.offset)
self.assertEqual(buffer_ref.data, fd.read(buffer_ref.length))
self.assertEqual(
buffer_ref.data[bytes_before:bytes_before + len(literal)], literal)
def testRegexMatchCondition(self):
searching_path = os.path.join(self.base_path, "searching")
paths = [searching_path + "/{dpkg.log,dpkg_false.log,auth.log}"]
regex = r"pa[nm]_o?unix\(s{2}h"
bytes_before = 10
bytes_after = 20
crmc = rdf_file_finder.FileFinderContentsRegexMatchCondition
condition = rdf_file_finder.FileFinderCondition(
condition_type="CONTENTS_REGEX_MATCH",
contents_regex_match=crmc(
regex=regex,
bytes_before=bytes_before,
bytes_after=bytes_after,
))
raw_results = self._RunFileFinder(
paths, self.stat_action, conditions=[condition])
relative_results = self._GetRelativeResults(
raw_results, base_path=searching_path)
self.assertEqual(len(relative_results), 1)
self.assertIn("auth.log", relative_results)
self.assertEqual(len(raw_results[0].matches), 1)
buffer_ref = raw_results[0].matches[0]
orig_data = open(os.path.join(searching_path, "auth.log")).read()
self.assertEqual(
orig_data[buffer_ref.offset:buffer_ref.offset + buffer_ref.length],
buffer_ref.data)
def testRegexMatchConditionAllHits(self):
searching_path = os.path.join(self.base_path, "searching")
paths = [searching_path + "/{dpkg.log,dpkg_false.log,auth.log}"]
bytes_before = 10
bytes_after = 20
crmc = rdf_file_finder.FileFinderContentsRegexMatchCondition
regex = r"mydo....\.com"
condition = rdf_file_finder.FileFinderCondition(
condition_type="CONTENTS_REGEX_MATCH",
contents_regex_match=crmc(
regex=regex,
mode="ALL_HITS",
bytes_before=bytes_before,
bytes_after=bytes_after,
))
raw_results = self._RunFileFinder(
paths, self.stat_action, conditions=[condition])
self.assertEqual(len(raw_results), 1)
self.assertEqual(len(raw_results[0].matches), 6)
for buffer_ref in raw_results[0].matches:
needle = "mydomain.com"
self.assertEqual(buffer_ref.data[bytes_before:bytes_before + len(needle)],
needle)
def testHashAction(self):
paths = [os.path.join(self.base_path, "hello.exe")]
hash_action = rdf_file_finder.FileFinderAction(
action_type=rdf_file_finder.FileFinderAction.Action.HASH)
results = self._RunFileFinder(paths, hash_action)
self.assertEqual(len(results), 1)
res = results[0]
data = open(paths[0], "rb").read()
self.assertEqual(res.hash_entry.num_bytes, len(data))
self.assertEqual(res.hash_entry.md5.HexDigest(),
hashlib.md5(data).hexdigest())
self.assertEqual(res.hash_entry.sha1.HexDigest(),
hashlib.sha1(data).hexdigest())
self.assertEqual(res.hash_entry.sha256.HexDigest(),
hashlib.sha256(data).hexdigest())
hash_action = rdf_file_finder.FileFinderAction(
action_type=rdf_file_finder.FileFinderAction.Action.HASH,
hash=rdf_file_finder.FileFinderHashActionOptions(
max_size=100, oversized_file_policy="SKIP"))
results = self._RunFileFinder(paths, hash_action)
self.assertEqual(len(results), 1)
res = results[0]
self.assertFalse(res.HasField("hash"))
hash_action = rdf_file_finder.FileFinderAction(
action_type=rdf_file_finder.FileFinderAction.Action.HASH,
hash=rdf_file_finder.FileFinderHashActionOptions(
max_size=100, oversized_file_policy="HASH_TRUNCATED"))
results = self._RunFileFinder(paths, hash_action)
self.assertEqual(len(results), 1)
res = results[0]
data = open(paths[0], "rb").read()[:100]
self.assertEqual(res.hash_entry.num_bytes, len(data))
self.assertEqual(res.hash_entry.md5.HexDigest(),
hashlib.md5(data).hexdigest())
self.assertEqual(res.hash_entry.sha1.HexDigest(),
hashlib.sha1(data).hexdigest())
self.assertEqual(res.hash_entry.sha256.HexDigest(),
hashlib.sha256(data).hexdigest())
def _RunFileFinderDownloadHello(self, upload, opts=None):
action = rdf_file_finder.FileFinderAction.Download()
action.download = opts
upload.return_value = rdf_client.UploadedFile(
bytes_uploaded=42, file_id="foo", hash=rdf_crypto.Hash())
hello_path = os.path.join(self.base_path, "hello.exe")
return self._RunFileFinder([hello_path], action)
@mock.patch.object(comms.GRRClientWorker, "UploadFile")
def testDownloadActionDefault(self, upload):
results = self._RunFileFinderDownloadHello(upload)
self.assertEquals(len(results), 1)
self.assertTrue(upload.called_with(max_bytes=None))
self.assertTrue(results[0].HasField("uploaded_file"))
self.assertEquals(results[0].uploaded_file, upload.return_value)
@mock.patch.object(comms.GRRClientWorker, "UploadFile")
def testDownloadActionSkip(self, upload):
opts = rdf_file_finder.FileFinderDownloadActionOptions(
max_size=0, oversized_file_policy="SKIP")
results = self._RunFileFinderDownloadHello(upload, opts=opts)
self.assertEquals(len(results), 1)
self.assertFalse(upload.called)
self.assertFalse(results[0].HasField("uploaded_file"))
@mock.patch.object(comms.GRRClientWorker, "UploadFile")
def testDownloadActionTruncate(self, upload):
opts = rdf_file_finder.FileFinderDownloadActionOptions(
max_size=42, oversized_file_policy="DOWNLOAD_TRUNCATED")
results = self._RunFileFinderDownloadHello(upload, opts=opts)
self.assertEquals(len(results), 1)
self.assertTrue(upload.called_with(max_bytes=42))
self.assertTrue(results[0].HasField("uploaded_file"))
self.assertEquals(results[0].uploaded_file, upload.return_value)
EXT2_COMPR_FL = 0x00000004
EXT2_IMMUTABLE_FL = 0x00000010
# TODO(hanuszczak): Maybe it would make sense to refactor this to a helper
# constructor of the `rdf_file_finder.FileFinderAction`.
@staticmethod
def _StatAction(**kwargs):
action_type = rdf_file_finder.FileFinderAction.Action.STAT
opts = rdf_file_finder.FileFinderStatActionOptions(**kwargs)
return rdf_file_finder.FileFinderAction(action_type=action_type, stat=opts)
@unittest.skipIf(platform.system() != "Linux", "requires Linux")
def testStatExtFlags(self):
with test_lib.AutoTempFilePath() as temp_filepath:
if subprocess.call(["which", "chattr"]) != 0:
raise unittest.SkipTest("`chattr` command is not available")
if subprocess.call(["chattr", "+c", temp_filepath]) != 0:
reason = "extended attributes not supported by filesystem"
raise unittest.SkipTest(reason)
action = self._StatAction()
results = self._RunFileFinder([temp_filepath], action)
self.assertEqual(len(results), 1)
stat_entry = results[0].stat_entry
self.assertTrue(stat_entry.st_flags_linux & self.EXT2_COMPR_FL)
self.assertFalse(stat_entry.st_flags_linux & self.EXT2_IMMUTABLE_FL)
def testStatExtAttrs(self):
with test_lib.AutoTempFilePath() as temp_filepath:
self._SetExtAttr(temp_filepath, "user.foo", "bar")
self._SetExtAttr(temp_filepath, "user.quux", "norf")
action = self._StatAction()
results = self._RunFileFinder([temp_filepath], action)
self.assertEqual(len(results), 1)
ext_attrs = results[0].stat_entry.ext_attrs
self.assertEqual(ext_attrs[0].name, "user.foo")
self.assertEqual(ext_attrs[0].value, "bar")
self.assertEqual(ext_attrs[1].name, "user.quux")
self.assertEqual(ext_attrs[1].value, "norf")
action = self._StatAction(ext_attrs=False)
results = self._RunFileFinder([temp_filepath], action)
self.assertEqual(len(results), 1)
ext_attrs = results[0].stat_entry.ext_attrs
self.assertFalse(ext_attrs)
@classmethod
def _SetExtAttr(cls, filepath, name, value):
if platform.system() == "Linux":
cls._SetExtAttrLinux(filepath, name, value)
elif platform.system() == "Darwin":
cls._SetExtAttrOsx(filepath, name, value)
else:
raise unittest.SkipTest("unsupported system")
@classmethod
def _SetExtAttrLinux(cls, filepath, name, value):
if subprocess.call(["which", "setfattr"]) != 0:
raise unittest.SkipTest("`setfattr` command is not available")
if subprocess.call(["setfattr", filepath, "-n", name, "-v", value]) != 0:
raise unittest.SkipTest("extended attributes not supported by filesystem")
@classmethod
def _SetExtAttrOsx(cls, filepath, name, value):
if subprocess.call(["xattr", "-w", name, value, filepath]) != 0:
raise unittest.SkipTest("extended attributes not supported")
def testLinkStat(self):
"""Tests resolving symlinks when getting stat entries."""
test_dir = os.path.join(self.temp_dir, "lnk_stat_test")
lnk = os.path.join(test_dir, "lnk")
lnk_target = os.path.join(test_dir, "lnk_target")
os.mkdir(test_dir)
with open(lnk_target, "wb") as fd:
fd.write("sometext")
os.symlink(lnk_target, lnk)
paths = [lnk]
link_size = os.lstat(lnk).st_size
target_size = os.stat(lnk).st_size
for expected_size, resolve_links in [(link_size, False), (target_size,
True)]:
stat_action = rdf_file_finder.FileFinderAction.Stat(
resolve_links=resolve_links)
results = self._RunFileFinder(paths, stat_action)
self.assertEqual(len(results), 1)
res = results[0]
self.assertEqual(res.stat_entry.st_size, expected_size)
def testModificationTimeCondition(self):
with utils.Stubber(os, "lstat", MyStat):
test_dir = self._PrepareTimestampedFiles()
# We have one "old" file, auth.log, and two "new" ones, dpkg*.
paths = [test_dir + "/{dpkg.log,dpkg_false.log,auth.log}"]
change_time = rdfvalue.RDFDatetime.FromHumanReadable("2020-01-01")
modification_time_condition = rdf_file_finder.FileFinderCondition(
condition_type="MODIFICATION_TIME",
modification_time=rdf_file_finder.FileFinderModificationTimeCondition(
max_last_modified_time=change_time))
| |
# coding: utf-8
"""
Speech Services API v2.0
Speech Services API v2.0. # noqa: E501
OpenAPI spec version: v2.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.model import Model # noqa: F401,E501
class Endpoint(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'concurrent_recognitions': 'int',
'id': 'str',
'endpoint_kind': 'str',
'endpoint_urls': 'dict(str, str)',
'created_date_time': 'datetime',
'last_action_date_time': 'datetime',
'status': 'str',
'models': 'list[Model]',
'content_logging_enabled': 'bool',
'name': 'str',
'description': 'str',
'properties': 'dict(str, str)',
'locale': 'str'
}
attribute_map = {
'concurrent_recognitions': 'concurrentRecognitions',
'id': 'id',
'endpoint_kind': 'endpointKind',
'endpoint_urls': 'endpointUrls',
'created_date_time': 'createdDateTime',
'last_action_date_time': 'lastActionDateTime',
'status': 'status',
'models': 'models',
'content_logging_enabled': 'contentLoggingEnabled',
'name': 'name',
'description': 'description',
'properties': 'properties',
'locale': 'locale'
}
def __init__(self, concurrent_recognitions=None, id=None, endpoint_kind=None, endpoint_urls=None, created_date_time=None, last_action_date_time=None, status=None, models=None, content_logging_enabled=None, name=None, description=None, properties=None, locale=None): # noqa: E501
"""Endpoint - a model defined in Swagger""" # noqa: E501
self._concurrent_recognitions = None
self._id = None
self._endpoint_kind = None
self._endpoint_urls = None
self._created_date_time = None
self._last_action_date_time = None
self._status = None
self._models = None
self._content_logging_enabled = None
self._name = None
self._description = None
self._properties = None
self._locale = None
self.discriminator = None
if concurrent_recognitions is not None:
self.concurrent_recognitions = concurrent_recognitions
self.id = id
self.endpoint_kind = endpoint_kind
self.endpoint_urls = endpoint_urls
self.created_date_time = created_date_time
self.last_action_date_time = last_action_date_time
self.status = status
self.models = models
if content_logging_enabled is not None:
self.content_logging_enabled = content_logging_enabled
self.name = name
if description is not None:
self.description = description
if properties is not None:
self.properties = properties
if locale is not None:
self.locale = locale
@property
def concurrent_recognitions(self):
"""Gets the concurrent_recognitions of this Endpoint. # noqa: E501
The number of concurrent recognitions the endpoint supports # noqa: E501
:return: The concurrent_recognitions of this Endpoint. # noqa: E501
:rtype: int
"""
return self._concurrent_recognitions
@concurrent_recognitions.setter
def concurrent_recognitions(self, concurrent_recognitions):
"""Sets the concurrent_recognitions of this Endpoint.
The number of concurrent recognitions the endpoint supports # noqa: E501
:param concurrent_recognitions: The concurrent_recognitions of this Endpoint. # noqa: E501
:type: int
"""
self._concurrent_recognitions = concurrent_recognitions
@property
def id(self):
"""Gets the id of this Endpoint. # noqa: E501
The identifier of this entity # noqa: E501
:return: The id of this Endpoint. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Endpoint.
The identifier of this entity # noqa: E501
:param id: The id of this Endpoint. # noqa: E501
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def endpoint_kind(self):
"""Gets the endpoint_kind of this Endpoint. # noqa: E501
The kind of this endpoint (e.g. custom speech, custom voice ...) # noqa: E501
:return: The endpoint_kind of this Endpoint. # noqa: E501
:rtype: str
"""
return self._endpoint_kind
@endpoint_kind.setter
def endpoint_kind(self, endpoint_kind):
"""Sets the endpoint_kind of this Endpoint.
The kind of this endpoint (e.g. custom speech, custom voice ...) # noqa: E501
:param endpoint_kind: The endpoint_kind of this Endpoint. # noqa: E501
:type: str
"""
if endpoint_kind is None:
raise ValueError("Invalid value for `endpoint_kind`, must not be `None`") # noqa: E501
allowed_values = ["None", "SpeechRecognition", "CustomVoice", "LanguageGeneration", "LanguageIdentification"] # noqa: E501
if endpoint_kind not in allowed_values:
raise ValueError(
"Invalid value for `endpoint_kind` ({0}), must be one of {1}" # noqa: E501
.format(endpoint_kind, allowed_values)
)
self._endpoint_kind = endpoint_kind
@property
def endpoint_urls(self):
"""Gets the endpoint_urls of this Endpoint. # noqa: E501
The list of endpoint urls # noqa: E501
:return: The endpoint_urls of this Endpoint. # noqa: E501
:rtype: dict(str, str)
"""
return self._endpoint_urls
@endpoint_urls.setter
def endpoint_urls(self, endpoint_urls):
"""Sets the endpoint_urls of this Endpoint.
The list of endpoint urls # noqa: E501
:param endpoint_urls: The endpoint_urls of this Endpoint. # noqa: E501
:type: dict(str, str)
"""
if endpoint_urls is None:
raise ValueError("Invalid value for `endpoint_urls`, must not be `None`") # noqa: E501
self._endpoint_urls = endpoint_urls
@property
def created_date_time(self):
"""Gets the created_date_time of this Endpoint. # noqa: E501
The time-stamp when the object was created # noqa: E501
:return: The created_date_time of this Endpoint. # noqa: E501
:rtype: datetime
"""
return self._created_date_time
@created_date_time.setter
def created_date_time(self, created_date_time):
"""Sets the created_date_time of this Endpoint.
The time-stamp when the object was created # noqa: E501
:param created_date_time: The created_date_time of this Endpoint. # noqa: E501
:type: datetime
"""
if created_date_time is None:
raise ValueError("Invalid value for `created_date_time`, must not be `None`") # noqa: E501
self._created_date_time = created_date_time
@property
def last_action_date_time(self):
"""Gets the last_action_date_time of this Endpoint. # noqa: E501
The time-stamp when the current status was entered # noqa: E501
:return: The last_action_date_time of this Endpoint. # noqa: E501
:rtype: datetime
"""
return self._last_action_date_time
@last_action_date_time.setter
def last_action_date_time(self, last_action_date_time):
"""Sets the last_action_date_time of this Endpoint.
The time-stamp when the current status was entered # noqa: E501
:param last_action_date_time: The last_action_date_time of this Endpoint. # noqa: E501
:type: datetime
"""
if last_action_date_time is None:
raise ValueError("Invalid value for `last_action_date_time`, must not be `None`") # noqa: E501
self._last_action_date_time = last_action_date_time
@property
def status(self):
"""Gets the status of this Endpoint. # noqa: E501
The status of the object # noqa: E501
:return: The status of this Endpoint. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this Endpoint.
The status of the object # noqa: E501
:param status: The status of this Endpoint. # noqa: E501
:type: str
"""
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
allowed_values = ["Succeeded", "Failed", "Running", "NotStarted"] # noqa: E501
if status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}" # noqa: E501
.format(status, allowed_values)
)
self._status = status
@property
def models(self):
"""Gets the models of this Endpoint. # noqa: E501
Information about the deployed models # noqa: E501
:return: The models of this Endpoint. # noqa: E501
:rtype: list[Model]
"""
return self._models
@models.setter
def models(self, models):
"""Sets the models of this Endpoint.
Information about the deployed models # noqa: E501
:param models: The models of this Endpoint. # noqa: E501
:type: list[Model]
"""
if models is None:
raise ValueError("Invalid value for `models`, must not be `None`") # noqa: E501
self._models = models
@property
def content_logging_enabled(self):
"""Gets the content_logging_enabled of this Endpoint. # noqa: E501
A value indicating whether content logging (audio & transcriptions) is being used for a deployment. Suppressing content logging will result in a higher cost for the deployment. Free subscriptions can only deploy true # noqa: E501
:return: The content_logging_enabled of this Endpoint. # noqa: E501
:rtype: bool
"""
return self._content_logging_enabled
@content_logging_enabled.setter
def content_logging_enabled(self, content_logging_enabled):
"""Sets the content_logging_enabled of this Endpoint.
A value indicating whether content logging (audio & transcriptions) is being used for a deployment. Suppressing content logging will result in a higher cost for the deployment. Free subscriptions can only deploy true # noqa: E501
:param content_logging_enabled: The content_logging_enabled of this Endpoint. # noqa: E501
:type: bool
"""
self._content_logging_enabled = content_logging_enabled
@property
def name(self):
"""Gets the name of this Endpoint. # noqa: E501
The name of the object # noqa: E501
:return: The name of this Endpoint. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Endpoint.
The name of the object # noqa: E501
:param name: The name of this Endpoint. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def description(self):
"""Gets the description of this Endpoint. # noqa: E501
The description of the object # noqa: E501
:return: The description of this Endpoint. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets | |
# Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import datetime
from .normalizing import normalize
from .misc import plural_or_not
def _get_timetuple(epoch_secs=None):
if epoch_secs is None: # can also be 0 (at least in unit tests)
epoch_secs = time.time()
secs, millis = _float_secs_to_secs_and_millis(epoch_secs)
timetuple = time.localtime(secs)[:6] # from year to secs
return timetuple + (millis,)
def _float_secs_to_secs_and_millis(secs):
isecs = int(secs)
millis = int(round((secs - isecs) * 1000))
return (isecs, millis) if millis < 1000 else (isecs+1, 0)
START_TIME = _get_timetuple()
def timestr_to_secs(timestr):
"""Parses time in format like '1h 10s' and returns time in seconds (float).
Given time must be in format '1d 2h 3m 4s 5ms' with following rules:
- Time parts having zero value can be ignored (e.g. '3m 4s' is ok)
- Format is case and space insensitive
- Instead of 'd' it is also possible to use 'day' or 'days'
- Instead of 'h' also 'hour' and 'hours' are ok
- Instead of 'm' also 'minute', 'minutes', 'min' and 'mins' are ok
- Instead of 's' also 'second', 'seconds', 'sec' and 'secs' are ok
- Instead of 'ms' also 'millisecond', 'milliseconds' and 'millis' are ok
- It is possible to give time only as a float and then it is considered
to be seconds (e.g. '123', '123.0', '123s', '2min 3s' are all equivelant)
"""
try:
secs = _timestr_to_secs(timestr)
except (ValueError, TypeError):
raise ValueError("Invalid time string '%s'" % timestr)
return round(secs, 3)
def _timestr_to_secs(timestr):
timestr = _normalize_timestr(timestr)
if timestr == '':
raise ValueError
try:
return float(timestr)
except ValueError:
pass
millis = secs = mins = hours = days = 0
if timestr[0] == '-':
sign = -1
timestr = timestr[1:]
else:
sign = 1
temp = []
for c in timestr:
if c == 'x': millis = float(''.join(temp)); temp = []
elif c == 's': secs = float(''.join(temp)); temp = []
elif c == 'm': mins = float(''.join(temp)); temp = []
elif c == 'h': hours = float(''.join(temp)); temp = []
elif c == 'p': days = float(''.join(temp)); temp = []
else: temp.append(c)
if temp:
raise ValueError
return sign * (millis/1000 + secs + mins*60 + hours*60*60 + days*60*60*24)
def _normalize_timestr(timestr):
if isinstance(timestr, (int, long, float)):
return timestr
timestr = normalize(timestr)
for item in 'milliseconds', 'millisecond', 'millis':
timestr = timestr.replace(item, 'ms')
for item in 'seconds', 'second', 'secs', 'sec':
timestr = timestr.replace(item, 's')
for item in 'minutes', 'minute', 'mins', 'min':
timestr = timestr.replace(item, 'm')
for item in 'hours', 'hour':
timestr = timestr.replace(item, 'h')
for item in 'days', 'day':
timestr = timestr.replace(item, 'd')
# 1) 'ms' -> 'x' to ease processing later
# 2) 'd' -> 'p' because float('1d') returns 1.0 in Jython (bug submitted)
return timestr.replace('ms','x').replace('d','p')
def secs_to_timestr(secs, compact=False):
"""Converts time in seconds to a string representation.
Returned string is in format like
'1 day 2 hours 3 minutes 4 seconds 5 milliseconds' with following rules:
- Time parts having zero value are not included (e.g. '3 minutes 4 seconds'
instead of '0 days 0 hours 3 minutes 4 seconds')
- Hour part has a maximun of 23 and minutes and seconds both have 59
(e.g. '1 minute 40 seconds' instead of '100 seconds')
If compact has value 'True', short suffixes are used.
(e.g. 1d 2h 3min 4s 5ms)
"""
return _SecsToTimestrHelper(secs, compact).get_value()
class _SecsToTimestrHelper:
def __init__(self, float_secs, compact):
self._compact = compact
self._ret = []
self._sign, millis, secs, mins, hours, days \
= self._secs_to_components(float_secs)
self._add_item(days, 'd', 'day')
self._add_item(hours, 'h', 'hour')
self._add_item(mins, 'min', 'minute')
self._add_item(secs, 's', 'second')
self._add_item(millis, 'ms', 'millisecond')
def get_value(self):
if len(self._ret) > 0:
return self._sign + ' '.join(self._ret)
return '0s' if self._compact else '0 seconds'
def _add_item(self, value, compact_suffix, long_suffix):
if value == 0:
return
if self._compact:
suffix = compact_suffix
else:
suffix = ' %s%s' % (long_suffix, plural_or_not(value))
self._ret.append('%d%s' % (value, suffix))
def _secs_to_components(self, float_secs):
if float_secs < 0:
sign = '- '
float_secs = abs(float_secs)
else:
sign = ''
int_secs, millis = _float_secs_to_secs_and_millis(float_secs)
secs = int_secs % 60
mins = int(int_secs / 60) % 60
hours = int(int_secs / (60*60)) % 24
days = int(int_secs / (60*60*24))
return sign, millis, secs, mins, hours, days
def format_time(timetuple_or_epochsecs, daysep='', daytimesep=' ', timesep=':',
millissep=None, gmtsep=None):
"""Returns a timestamp formatted from given time using separators.
Time can be given either as a timetuple or seconds after epoch.
Timetuple is (year, month, day, hour, min, sec[, millis]), where parts must
be integers and millis is required only when millissep is not None.
Notice that this is not 100% compatible with standard Python timetuples
which do not have millis.
Seconds after epoch can be either an integer or a float.
"""
if isinstance(timetuple_or_epochsecs, (int, long, float)):
timetuple = _get_timetuple(timetuple_or_epochsecs)
else:
timetuple = timetuple_or_epochsecs
daytimeparts = ['%02d' % t for t in timetuple[:6]]
day = daysep.join(daytimeparts[:3])
time_ = timesep.join(daytimeparts[3:6])
millis = millissep and '%s%03d' % (millissep, timetuple[6]) or ''
return day + daytimesep + time_ + millis + _diff_to_gmt(gmtsep)
def _diff_to_gmt(sep):
if not sep:
return ''
if time.altzone == 0:
sign = ''
elif time.altzone > 0:
sign = '-'
else:
sign = '+'
minutes = abs(time.altzone) / 60.0
hours, minutes = divmod(minutes, 60)
return '%sGMT%s%s%02d:%02d' % (sep, sep, sign, hours, minutes)
def get_time(format='timestamp', time_=None):
"""Return the given or current time in requested format.
If time is not given, current time is used. How time is returned is
is deternined based on the given 'format' string as follows. Note that all
checks are case insensitive.
- If 'format' contains word 'epoch' the time is returned in seconds after
the unix epoch.
- If 'format' contains any of the words 'year', 'month', 'day', 'hour',
'min' or 'sec' only selected parts are returned. The order of the returned
parts is always the one in previous sentence and order of words in
'format' is not significant. Parts are returned as zero padded strings
(e.g. May -> '05').
- Otherwise (and by default) the time is returned as a timestamp string in
format '2006-02-24 15:08:31'
"""
time_ = int(time_ or time.time())
format = format.lower()
# 1) Return time in seconds since epoc
if 'epoch' in format:
return time_
timetuple = time.localtime(time_)
parts = []
for i, match in enumerate('year month day hour min sec'.split()):
if match in format:
parts.append('%.2d' % timetuple[i])
# 2) Return time as timestamp
if not parts:
return format_time(timetuple, daysep='-')
# Return requested parts of the time
elif len(parts) == 1:
return parts[0]
else:
return parts
def parse_time(timestr):
"""Parses the time string and returns its value as seconds since epoch.
Time can be given in five different formats:
1) Numbers are interpreted as time since epoch directly. It is possible to
use also ints and floats, not only strings containing numbers.
2) Valid timestamp ('YYYY-MM-DD hh:mm:ss' and 'YYYYMMDD hhmmss').
3) 'NOW' (case-insensitive) is the current local time.
4) 'UTC' (case-insensitive) is the current time in UTC.
5) Format 'NOW - 1 day' or 'UTC + 1 hour 30 min' is the current local/UTC
time plus/minus the time specified with the time string.
Seconds are rounded down to avoid getting times in the future.
"""
for method in [_parse_time_epoch,
_parse_time_timestamp,
_parse_time_now_and_utc]:
seconds = method(timestr)
if seconds is not None:
return int(seconds)
raise ValueError("Invalid time format '%s'" % timestr)
def _parse_time_epoch(timestr):
try:
ret = float(timestr)
except ValueError:
return None
if ret < 0:
raise ValueError("Epoch time must be positive (got %s)" % timestr)
return ret
def _parse_time_timestamp(timestr):
try:
return timestamp_to_secs(timestr, (' ', ':', '-', '.'))
except ValueError:
return None
def _parse_time_now_and_utc(timestr):
timestr = timestr.replace(' ', '').lower()
base = | |
LETTER TCHE': None,
'CYRILLIC CAPITAL LETTER TE WITH MIDDLE HOOK': None,
'CYRILLIC CAPITAL LETTER TSSE': None,
'CYRILLIC CAPITAL LETTER TSWE': None,
'CYRILLIC CAPITAL LETTER TWE': None,
'CYRILLIC CAPITAL LETTER WE': None,
'CYRILLIC CAPITAL LETTER YAE': None,
'CYRILLIC CAPITAL LETTER YERU WITH BACK YER': None,
'CYRILLIC CAPITAL LETTER YN': None,
'CYRILLIC CAPITAL LETTER ZEMLYA': None,
'CYRILLIC CAPITAL LETTER ZHWE': None,
'CYRILLIC KAVYKA': None,
'CYRILLIC LETTER MULTIOCULAR O': None,
'CYRILLIC LETTER SMALL CAPITAL EL': None,
'CYRILLIC PAYEROK': None,
'CYRILLIC SMALL LETTER ALEUT KA': None,
'CYRILLIC SMALL LETTER BINOCULAR O': None,
'CYRILLIC SMALL LETTER BLENDED YUS': None,
'CYRILLIC SMALL LETTER BROAD OMEGA': None,
'CYRILLIC SMALL LETTER CCHE': None,
'CYRILLIC SMALL LETTER CLOSED LITTLE YUS': None,
'CYRILLIC SMALL LETTER DJERV': None,
'CYRILLIC SMALL LETTER DOUBLE MONOCULAR O': None,
'CYRILLIC SMALL LETTER DWE': None,
'CYRILLIC SMALL LETTER DZELO': None,
'CYRILLIC SMALL LETTER DZWE': None,
'CYRILLIC SMALL LETTER DZZE': None,
'CYRILLIC SMALL LETTER EL WITH HOOK': None,
'CYRILLIC SMALL LETTER EL WITH MIDDLE HOOK': None,
'CYRILLIC SMALL LETTER EN WITH MIDDLE HOOK': None,
'CYRILLIC SMALL LETTER GHE WITH DESCENDER': None,
'CYRILLIC SMALL LETTER GHE WITH STROKE AND HOOK': None,
'CYRILLIC SMALL LETTER HA WITH HOOK': None,
'CYRILLIC SMALL LETTER HA WITH STROKE': None,
'CYRILLIC SMALL LETTER HWE': None,
'CYRILLIC SMALL LETTER IOTA': None,
'CYRILLIC SMALL LETTER IOTIFIED A': None,
'CYRILLIC SMALL LETTER IOTIFIED CLOSED LITTLE YUS': None,
'CYRILLIC SMALL LETTER IOTIFIED YAT': None,
'CYRILLIC SMALL LETTER LHA': None,
'CYRILLIC SMALL LETTER MONOCULAR O': None,
'CYRILLIC SMALL LETTER MONOGRAPH UK': None,
'CYRILLIC SMALL LETTER NEUTRAL YER': None,
'CYRILLIC SMALL LETTER PALOCHKA': None,
'CYRILLIC SMALL LETTER PE WITH DESCENDER': None,
'CYRILLIC SMALL LETTER QA': None,
'CYRILLIC SMALL LETTER REVERSED DZE': None,
'CYRILLIC SMALL LETTER REVERSED YU': None,
'CYRILLIC SMALL LETTER REVERSED ZE': None,
'CYRILLIC SMALL LETTER RHA': None,
'CYRILLIC SMALL LETTER SHWE': None,
'CYRILLIC SMALL LETTER SOFT DE': None,
'CYRILLIC SMALL LETTER SOFT EL': None,
'CYRILLIC SMALL LETTER SOFT EM': None,
'CYRILLIC SMALL LETTER TCHE': None,
'CYRILLIC SMALL LETTER TE WITH MIDDLE HOOK': None,
'CYRILLIC SMALL LETTER TSSE': None,
'CYRILLIC SMALL LETTER TSWE': None,
'CYRILLIC SMALL LETTER TWE': None,
'CYRILLIC SMALL LETTER WE': None,
'CYRILLIC SMALL LETTER YAE': None,
'CYRILLIC SMALL LETTER YERU WITH BACK YER': None,
'CYRILLIC SMALL LETTER YN': None,
'CYRILLIC SMALL LETTER ZEMLYA': None,
'CYRILLIC SMALL LETTER ZHWE': None,
'DECIMAL EXPONENT SYMBOL': None,
'DESERET CAPITAL LETTER EW': None,
'DESERET CAPITAL LETTER OI': None,
'DESERET SMALL LETTER EW': None,
'DESERET SMALL LETTER OI': None,
'DEVANAGARI CARET': None,
'DEVANAGARI GAP FILLER': None,
'DEVANAGARI HEADSTROKE': None,
'DEVANAGARI LETTER BBA': None,
'DEVANAGARI LETTER CANDRA A': None,
'DEVANAGARI LETTER DDDA': None,
'DEVANAGARI LETTER GGA': None,
'DEVANAGARI LETTER GLOTTAL STOP': None,
'DEVANAGARI LETTER HEAVY YA': None,
'DEVANAGARI LETTER JJA': None,
'DEVANAGARI LETTER SHORT A': None,
'DEVANAGARI LETTER ZHA': None,
'DEVANAGARI SIGN CANDRABINDU AVAGRAHA': None,
'DEVANAGARI SIGN CANDRABINDU THREE': None,
'DEVANAGARI SIGN CANDRABINDU TWO': None,
'DEVANAGARI SIGN CANDRABINDU VIRAMA': None,
'DEVANAGARI SIGN DOUBLE CANDRABINDU VIRAMA': None,
'DEVANAGARI SIGN HIGH SPACING DOT': None,
'DEVANAGARI SIGN INVERTED CANDRABINDU': None,
'DEVANAGARI SIGN PUSHPIKA': None,
'DEVANAGARI SIGN SPACING CANDRABINDU': None,
'DEVANAGARI VOWEL SIGN CANDRA LONG E': None,
'DEVANAGARI VOWEL SIGN PRISHTHAMATRA E': None,
'DIAMOND WITH BOTTOM HALF BLACK': None,
'DIAMOND WITH LEFT HALF BLACK': None,
'DIAMOND WITH RIGHT HALF BLACK': None,
'DIAMOND WITH TOP HALF BLACK': None,
'DIGIT EIGHT COMMA': None,
'DIGIT FIVE COMMA': None,
'DIGIT FOUR COMMA': None,
'DIGIT NINE COMMA': None,
'DIGIT ONE COMMA': None,
'DIGIT SEVEN COMMA': None,
'DIGIT SIX COMMA': None,
'DIGIT THREE COMMA': None,
'DIGIT TWO COMMA': None,
'DIGIT ZERO COMMA': None,
'DIGIT ZERO FULL STOP': None,
'DIGRAM FOR EARTH': None,
'DIGRAM FOR EARTHLY HEAVEN': None,
'DIGRAM FOR EARTHLY HUMAN': None,
'DIGRAM FOR GREATER YANG': None,
'DIGRAM FOR GREATER YIN': None,
'DIGRAM FOR HEAVENLY EARTH': None,
'DIGRAM FOR HUMAN EARTH': None,
'DIGRAM FOR LESSER YANG': None,
'DIGRAM FOR LESSER YIN': None,
'DISABLED CAR': None,
'DIVORCE SYMBOL': None,
'DOMINO TILE HORIZONTAL BACK': None,
'DOMINO TILE HORIZONTAL-00-00': None,
'DOMINO TILE HORIZONTAL-00-01': None,
'DOMINO TILE HORIZONTAL-00-02': None,
'DOMINO TILE HORIZONTAL-00-03': None,
'DOMINO TILE HORIZONTAL-00-04': None,
'DOMINO TILE HORIZONTAL-00-05': None,
'DOMINO TILE HORIZONTAL-00-06': None,
'DOMINO TILE HORIZONTAL-01-00': None,
'DOMINO TILE HORIZONTAL-01-01': None,
'DOMINO TILE HORIZONTAL-01-02': None,
'DOMINO TILE HORIZONTAL-01-03': None,
'DOMINO TILE HORIZONTAL-01-04': None,
'DOMINO TILE HORIZONTAL-01-05': None,
'DOMINO TILE HORIZONTAL-01-06': None,
'DOMINO TILE HORIZONTAL-02-00': None,
'DOMINO TILE HORIZONTAL-02-01': None,
'DOMINO TILE HORIZONTAL-02-02': None,
'DOMINO TILE HORIZONTAL-02-03': None,
'DOMINO TILE HORIZONTAL-02-04': None,
'DOMINO TILE HORIZONTAL-02-05': None,
'DOMINO TILE HORIZONTAL-02-06': None,
'DOMINO TILE HORIZONTAL-03-00': None,
'DOMINO TILE HORIZONTAL-03-01': None,
'DOMINO TILE HORIZONTAL-03-02': None,
'DOMINO TILE HORIZONTAL-03-03': None,
'DOMINO TILE HORIZONTAL-03-04': None,
'DOMINO TILE HORIZONTAL-03-05': None,
'DOMINO TILE HORIZONTAL-03-06': None,
'DOMINO TILE HORIZONTAL-04-00': None,
'DOMINO TILE HORIZONTAL-04-01': None,
'DOMINO TILE HORIZONTAL-04-02': None,
'DOMINO TILE HORIZONTAL-04-03': None,
'DOMINO TILE HORIZONTAL-04-04': None,
'DOMINO TILE HORIZONTAL-04-05': None,
'DOMINO TILE HORIZONTAL-04-06': None,
'DOMINO TILE HORIZONTAL-05-00': None,
'DOMINO TILE HORIZONTAL-05-01': None,
'DOMINO TILE HORIZONTAL-05-02': None,
'DOMINO TILE HORIZONTAL-05-03': None,
'DOMINO TILE HORIZONTAL-05-04': None,
'DOMINO TILE HORIZONTAL-05-05': None,
'DOMINO TILE HORIZONTAL-05-06': None,
'DOMINO TILE HORIZONTAL-06-00': None,
'DOMINO TILE HORIZONTAL-06-01': None,
'DOMINO TILE HORIZONTAL-06-02': None,
'DOMINO TILE HORIZONTAL-06-03': None,
'DOMINO TILE HORIZONTAL-06-04': None,
'DOMINO TILE HORIZONTAL-06-05': None,
'DOMINO TILE HORIZONTAL-06-06': None,
'DOMINO TILE VERTICAL BACK': None,
'DOMINO TILE VERTICAL-00-00': None,
'DOMINO TILE VERTICAL-00-01': None,
'DOMINO TILE VERTICAL-00-02': None,
'DOMINO TILE VERTICAL-00-03': None,
'DOMINO TILE VERTICAL-00-04': None,
'DOMINO TILE VERTICAL-00-05': None,
'DOMINO TILE VERTICAL-00-06': None,
'DOMINO TILE VERTICAL-01-00': None,
'DOMINO TILE VERTICAL-01-01': None,
'DOMINO TILE VERTICAL-01-02': None,
'DOMINO TILE VERTICAL-01-03': None,
'DOMINO TILE VERTICAL-01-04': None,
'DOMINO TILE VERTICAL-01-05': None,
'DOMINO TILE VERTICAL-01-06': None,
'DOMINO TILE VERTICAL-02-00': None,
'DOMINO TILE VERTICAL-02-01': None,
'DOMINO TILE VERTICAL-02-02': None,
'DOMINO TILE VERTICAL-02-03': None,
'DOMINO TILE VERTICAL-02-04': None,
'DOMINO TILE VERTICAL-02-05': None,
'DOMINO TILE VERTICAL-02-06': None,
'DOMINO TILE VERTICAL-03-00': None,
'DOMINO TILE VERTICAL-03-01': None,
'DOMINO TILE VERTICAL-03-02': None,
'DOMINO TILE VERTICAL-03-03': None,
'DOMINO TILE VERTICAL-03-04': None,
'DOMINO TILE VERTICAL-03-05': None,
'DOMINO TILE VERTICAL-03-06': None,
'DOMINO TILE VERTICAL-04-00': None,
'DOMINO TILE VERTICAL-04-01': None,
'DOMINO TILE VERTICAL-04-02': None,
'DOMINO TILE VERTICAL-04-03': None,
'DOMINO TILE VERTICAL-04-04': None,
'DOMINO TILE VERTICAL-04-05': None,
'DOMINO TILE VERTICAL-04-06': None,
'DOMINO TILE VERTICAL-05-00': None,
'DOMINO TILE VERTICAL-05-01': None,
'DOMINO TILE VERTICAL-05-02': None,
'DOMINO TILE VERTICAL-05-03': None,
'DOMINO TILE VERTICAL-05-04': None,
'DOMINO TILE VERTICAL-05-05': None,
'DOMINO TILE VERTICAL-05-06': None,
'DOMINO TILE VERTICAL-06-00': None,
'DOMINO TILE VERTICAL-06-01': None,
'DOMINO TILE VERTICAL-06-02': None,
'DOMINO TILE VERTICAL-06-03': None,
'DOMINO TILE VERTICAL-06-04': None,
'DOMINO TILE VERTICAL-06-05': None,
'DOMINO TILE VERTICAL-06-06': None,
'DOTTED CROSS': None,
'DOTTED OBELOS': None,
'DOTTED RIGHT-POINTING ANGLE': None,
'DOTTED SQUARE': None,
'DOTTED TRANSPOSITION MARKER': None,
'DOUBLE OBLIQUE HYPHEN': None,
'DOUBLE-STRUCK SMALL PI': None,
'DOUBLED FEMALE SIGN': None,
'DOUBLED MALE SIGN': None,
'DOWNWARDS ANCORA': None,
'DOWNWARDS BLACK ARROW': None,
'DRIVE SLOW SIGN': None,
'EARTH GROUND': None,
'EDITORIAL CORONIS': None,
'EGYPTIAN HIEROGLYPH A001': None,
'EGYPTIAN HIEROGLYPH A002': None,
'EGYPTIAN HIEROGLYPH A003': None,
'EGYPTIAN HIEROGLYPH A004': None,
'EGYPTIAN HIEROGLYPH A005': None,
'EGYPTIAN HIEROGLYPH A005A': None,
'EGYPTIAN HIEROGLYPH A006': None,
'EGYPTIAN HIEROGLYPH A006A': None,
'EGYPTIAN HIEROGLYPH A006B': None,
'EGYPTIAN HIEROGLYPH A007': None,
'EGYPTIAN HIEROGLYPH A008': None,
'EGYPTIAN HIEROGLYPH A009': None,
'EGYPTIAN HIEROGLYPH A010': None,
'EGYPTIAN HIEROGLYPH A011': None,
'EGYPTIAN HIEROGLYPH A012': None,
'EGYPTIAN HIEROGLYPH A013': None,
'EGYPTIAN HIEROGLYPH A014': None,
'EGYPTIAN HIEROGLYPH A014A': None,
'EGYPTIAN HIEROGLYPH A015': None,
'EGYPTIAN HIEROGLYPH A016': None,
'EGYPTIAN HIEROGLYPH A017': None,
'EGYPTIAN HIEROGLYPH A017A': None,
'EGYPTIAN HIEROGLYPH A018': None,
'EGYPTIAN HIEROGLYPH A019': None,
'EGYPTIAN HIEROGLYPH A020': None,
'EGYPTIAN HIEROGLYPH A021': None,
'EGYPTIAN HIEROGLYPH A022': None,
'EGYPTIAN HIEROGLYPH A023': None,
'EGYPTIAN HIEROGLYPH A024': None,
'EGYPTIAN HIEROGLYPH A025': None,
'EGYPTIAN HIEROGLYPH A026': None,
'EGYPTIAN HIEROGLYPH A027': None,
'EGYPTIAN HIEROGLYPH A028': None,
'EGYPTIAN HIEROGLYPH A029': None,
'EGYPTIAN HIEROGLYPH A030': None,
'EGYPTIAN HIEROGLYPH A031': None,
'EGYPTIAN HIEROGLYPH A032': None,
'EGYPTIAN HIEROGLYPH A032A': None,
'EGYPTIAN HIEROGLYPH A033': None,
'EGYPTIAN HIEROGLYPH A034': None,
'EGYPTIAN HIEROGLYPH A035': None,
'EGYPTIAN HIEROGLYPH A036': None,
'EGYPTIAN HIEROGLYPH A037': None,
'EGYPTIAN HIEROGLYPH A038': None,
'EGYPTIAN HIEROGLYPH A039': None,
'EGYPTIAN HIEROGLYPH A040': None,
'EGYPTIAN HIEROGLYPH A040A': None,
'EGYPTIAN HIEROGLYPH A041': None,
'EGYPTIAN HIEROGLYPH A042': None,
'EGYPTIAN HIEROGLYPH A042A': None,
'EGYPTIAN HIEROGLYPH A043': None,
'EGYPTIAN HIEROGLYPH A043A': None,
'EGYPTIAN HIEROGLYPH A044': None,
'EGYPTIAN HIEROGLYPH A045': None,
'EGYPTIAN HIEROGLYPH A045A': None,
'EGYPTIAN HIEROGLYPH A046': None,
'EGYPTIAN HIEROGLYPH A047': None,
'EGYPTIAN HIEROGLYPH A048': None,
'EGYPTIAN HIEROGLYPH A049': None,
'EGYPTIAN HIEROGLYPH A050': None,
'EGYPTIAN HIEROGLYPH A051': None,
'EGYPTIAN HIEROGLYPH A052': None,
'EGYPTIAN HIEROGLYPH A053': None,
'EGYPTIAN HIEROGLYPH A054': None,
'EGYPTIAN HIEROGLYPH A055': None,
'EGYPTIAN HIEROGLYPH A056': None,
'EGYPTIAN HIEROGLYPH A057': None,
'EGYPTIAN HIEROGLYPH A058': None,
'EGYPTIAN HIEROGLYPH A059': None,
'EGYPTIAN HIEROGLYPH A060': None,
'EGYPTIAN HIEROGLYPH A061': None,
'EGYPTIAN HIEROGLYPH A062': None,
'EGYPTIAN HIEROGLYPH A063': None,
'EGYPTIAN HIEROGLYPH A064': None,
'EGYPTIAN HIEROGLYPH A065': None,
'EGYPTIAN HIEROGLYPH A066': None,
'EGYPTIAN HIEROGLYPH A067': None,
'EGYPTIAN HIEROGLYPH A068': None,
'EGYPTIAN HIEROGLYPH A069': None,
'EGYPTIAN HIEROGLYPH A070': None,
'EGYPTIAN HIEROGLYPH AA001': None,
'EGYPTIAN HIEROGLYPH AA002': None,
'EGYPTIAN HIEROGLYPH AA003': None,
'EGYPTIAN HIEROGLYPH AA004': None,
'EGYPTIAN HIEROGLYPH AA005': None,
'EGYPTIAN HIEROGLYPH AA006': None,
'EGYPTIAN HIEROGLYPH AA007': None,
'EGYPTIAN HIEROGLYPH AA007A': None,
'EGYPTIAN HIEROGLYPH AA007B': None,
'EGYPTIAN HIEROGLYPH AA008': None,
'EGYPTIAN HIEROGLYPH AA009': None,
'EGYPTIAN HIEROGLYPH AA010': None,
'EGYPTIAN HIEROGLYPH AA011': None,
'EGYPTIAN HIEROGLYPH AA012': None,
'EGYPTIAN HIEROGLYPH AA013': None,
'EGYPTIAN HIEROGLYPH AA014': None,
'EGYPTIAN HIEROGLYPH AA015': None,
'EGYPTIAN HIEROGLYPH AA016': None,
'EGYPTIAN HIEROGLYPH AA017': None,
'EGYPTIAN HIEROGLYPH AA018': None,
'EGYPTIAN HIEROGLYPH AA019': None,
'EGYPTIAN HIEROGLYPH AA020': None,
'EGYPTIAN HIEROGLYPH AA021': None,
'EGYPTIAN HIEROGLYPH AA022': None,
'EGYPTIAN HIEROGLYPH AA023': None,
'EGYPTIAN HIEROGLYPH AA024': None,
'EGYPTIAN HIEROGLYPH AA025': None,
'EGYPTIAN HIEROGLYPH AA026': None,
'EGYPTIAN HIEROGLYPH AA027': None,
'EGYPTIAN HIEROGLYPH AA028': None,
'EGYPTIAN HIEROGLYPH AA029': None,
'EGYPTIAN HIEROGLYPH AA030': None,
'EGYPTIAN HIEROGLYPH AA031': None,
'EGYPTIAN HIEROGLYPH AA032': None,
'EGYPTIAN HIEROGLYPH B001': None,
'EGYPTIAN HIEROGLYPH B002': None,
'EGYPTIAN HIEROGLYPH B003': None,
'EGYPTIAN HIEROGLYPH B004': None,
'EGYPTIAN HIEROGLYPH B005': None,
'EGYPTIAN HIEROGLYPH B005A': None,
'EGYPTIAN HIEROGLYPH B006': None,
'EGYPTIAN HIEROGLYPH B007': None,
'EGYPTIAN HIEROGLYPH B008': None,
'EGYPTIAN HIEROGLYPH B009': None,
'EGYPTIAN HIEROGLYPH C001': None,
'EGYPTIAN HIEROGLYPH C002': None,
'EGYPTIAN HIEROGLYPH C002A': None,
'EGYPTIAN HIEROGLYPH C002B': None,
'EGYPTIAN HIEROGLYPH C002C': None,
'EGYPTIAN HIEROGLYPH C003': None,
'EGYPTIAN HIEROGLYPH C004': None,
'EGYPTIAN HIEROGLYPH C005': None,
'EGYPTIAN HIEROGLYPH C006': None,
'EGYPTIAN HIEROGLYPH C007': None,
'EGYPTIAN HIEROGLYPH C008': None,
'EGYPTIAN HIEROGLYPH C009': None,
'EGYPTIAN HIEROGLYPH C010': None,
'EGYPTIAN HIEROGLYPH C010A': None,
'EGYPTIAN HIEROGLYPH C011': None,
'EGYPTIAN HIEROGLYPH C012': None,
'EGYPTIAN HIEROGLYPH C013': None,
'EGYPTIAN HIEROGLYPH C014': None,
'EGYPTIAN HIEROGLYPH C015': None,
'EGYPTIAN HIEROGLYPH C016': None,
'EGYPTIAN HIEROGLYPH C017': | |
<reponame>wangjc640/532-Group21
"""
This file contains all the components of the dashboard including layout,
control filters and altair plots.
"""
import dash
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
import altair as alt
import dash_bootstrap_components as dbc
import pandas as pd
from vega_datasets import data as datasets
# import controls as ctrs
from src.dashboard import controls as ctrs
# Read in global data
gapminder = pd.read_csv("data/processed/gapminder_processed.csv", parse_dates=["year"])
# create clean country list
country_list = gapminder[["name", "id"]].drop_duplicates()
# Create dictionary for stat labels
labels = {
"life_expectancy": "Life Expectancy",
"education_ratio": "Education Ratio",
"pop_density": "Population Density",
"child_mortality": "Child Mortality",
"children_per_woman": "Children per Woman",
}
# Setup app and layout/frontend
app = dash.Dash(__name__, title = "GapExpresser", external_stylesheets=[dbc.themes.BOOTSTRAP])
server = app.server
controls = dbc.Card(
[
# control panel title
html.H2("Control Panel", className="text-center"),
# filter for Statistic of Interest
html.Hr(),
dbc.FormGroup(
[
html.H5("1. Statistic of Interest", className="text-left"),
ctrs.stat,
]
),
html.Hr(),
# filter for Region
dbc.FormGroup(
[
html.H5("2. Region", className="text-left"),
ctrs.region,
]
),
html.Hr(),
# filter for Sub Region
dbc.FormGroup(
[html.H5("3. Sub Region", className="text-left"), ctrs.sub_region]
),
html.Hr(),
# filter for Income Group
dbc.FormGroup(
[html.H5("4. Income Group", className="text-left"), ctrs.income_grp]
),
html.Hr(),
# filter for population size
dbc.FormGroup(
[html.H5("5. Population Size", className="text-left"), ctrs.pop_size]
),
html.Hr(),
# filter for year
dbc.FormGroup([html.H5("6. Year", className="text-left"), ctrs.year]),
html.Hr(),
# filter for year
dbc.FormGroup([html.H5("7. Show me", className="text-left"), ctrs.top_btm]),
html.Small(
"* Education Ratio calculated as # of years in school men / # of years in school women. Higher values indicate larger gap between the education levels for men and women."
),
html.Small(
"** Population Density (per square km). Average number of people on each square km of land in the given country. "
),
],
color="secondary",
inverse=True,
body=True,
)
world_map = html.Iframe(
id="world_map",
style={
"border-width": "0",
"width": "100%",
"height": "600px",
},
)
bar = html.Iframe(
id="bar",
style={
"border-width": "0",
"width": "100%",
"height": "400px",
},
)
line = html.Iframe(
id="line",
style={
"border-width": "0",
"width": "100%",
"height": "400px",
},
)
app.layout = dbc.Container(
[
html.Div(
style={
"textAlign": "center",
"color": "DarkSlateGray",
"font-size": "26px",
},
children=[
html.H1("GapExpresser"),
],
),
html.Hr(),
dbc.Row(
[
dbc.Col(controls, md=4),
dbc.Col(
[
dbc.Row(world_map, align="center"),
dbc.Row([dbc.Col([bar], md=6), dbc.Col([line], md=6)]),
html.Small(
"Note: empty plots mean that we don't have data based on your selection"
),
],
md=8,
),
],
align="center",
),
],
fluid=True,
)
# Set up callbacks/backend
@app.callback(
Output("sub_region", "options"),
Input("region", "value"),
)
def get_subregion(region):
"""Select sub regions to display based on region filter selection
Parameters
--------
region: string
Selection from the Region filter
Returns
--------
Options list for sub region belonging to the selected region
Example
--------
> get_subregion("Asia")
"""
if region is not None:
subs = gapminder[gapminder["region"] == region]["sub_region"].unique()
opts = []
for s in subs:
opts.append({"label": s, "value": s})
else:
opts = [
{"label": sub_reg, "value": sub_reg}
for sub_reg in gapminder["sub_region"].unique()
]
return opts
# Set up callbacks/backend
@app.callback(
Output("world_map", "srcDoc"),
Input("stat", "value"),
Input("region", "value"),
Input("sub_region", "value"),
Input("income_grp", "value"),
Input("pop_size", "value"),
Input("year", "value"),
)
def plot_map(stat, region, sub_region, income_grp, pop_size, year):
"""
Create map plot for statsitic of interested based on selected filters
Parameters
--------
stat: string
Selection from statistic of interest filter
region: string
Selection from the Region filter
sub_region: sting
Selection from Sub Region filter
income_grp: string
Selection from Income Group filter
pop_size: integer
Population size for which the data is displayed, from Population Size filter
year: integer
Year for which the data is displayed, from Year filter
Returns
--------
map_chart
map chart showing statistic of interest for specific region, subregion, income group and year
Example
--------
> plot_map("education_ratio", "Asia", "Western Asia", "Lower middle", [10_000, 1_000_000], [1968, 2015])
"""
#worldmap_data = data_filter(stat, region, sub_region, income_grp, year, pop_size)
alt.data_transformers.disable_max_rows()
data = filter_data(region, sub_region, income_grp)
data = filter_popsize(data, pop_size)
data = data[(data["year"] == f"{year[1]}")]
# append clean country list
data = data.merge(country_list, how="outer", on=["name", "id"])
# replace NaN values with 0
data[[stat]] = data[[stat]].fillna(-1)
# create world_map
world_map = alt.topo_feature(datasets.world_110m.url, "countries")
# if((region is None) & (sub_region is None) &(income_grp is None)):
if(region is None):
main_map = (alt.Chart(world_map, title=f"{labels[stat]} by Country for {year[1]}")
.mark_geoshape(stroke="black")
.transform_lookup(lookup="id", from_=alt.LookupData(data, key="id", fields=["name", stat]))
.encode(tooltip=["name:O", stat + ":Q"], color=alt.Color(stat + ":Q", title=f"{labels[stat]}"))
.configure_title(fontSize=24)
.configure_legend(labelFontSize=12)
.project(type="equalEarth")
.properties(width=1000, height=500)
)
elif(region is not None): #and sub_region is None
s = None
t = None
if region == "Europe":
s = 800
t = [150, 1010]
if region == "Asia":
s = 500
t = [-200, 500]
if region == "Africa":
s = 500
t = [400, 300]
if region == "Americas":
s = 300
t = [1000, 350]
if region == "Oceania":
s = 500
t = [-400, 0]
main_map = (alt.Chart(world_map, title=f"{labels[stat]} by Country for {year[1]}")
.mark_geoshape(stroke="black")
.transform_lookup(lookup="id", from_=alt.LookupData(data, key="id", fields=["name", stat]))
.encode(tooltip=["name:O", stat + ":Q"], color=alt.Color(stat + ":Q", title=f"{labels[stat]}"))
.configure_title(fontSize=24)
.configure_legend(labelFontSize=12)
.project(type='naturalEarth1', scale=s, translate=t)
.properties(width=1000, height=700))
map_chart = (
main_map
)
#test(stat, region, sub_region, income_grp, year, pop_size)
return map_chart.to_html()
@app.callback(
Output("bar", "srcDoc"),
Input("stat", "value"),
Input("region", "value"),
Input("sub_region", "value"),
Input("income_grp", "value"),
Input("top_btm", "value"),
Input("pop_size", "value"),
Input("year", "value"),
)
def plot_bar(stat, region, sub_region, income_grp, top_btm, pop_size, year):
"""
Create bar chart for statsitic of interested based on selected filters, for top 5 or bottom 5 countries
Parameters
--------
stat: string
Selection from statistic of interest filter
region: string
Selection from the Region filter
sub_region: sting
Selection from Sub Region filter
income_grp: string
Selection from Income Group filter
top_btm: string
Selection from Top/Bottom filter
pop_size: integer
Population size for which the data is displayed, from Population Size filter
year: integer
Year for which the data is displayed, from Year filter
Returns
--------
chart
bar chart showing statistic of interest for top 5 or bottom 5 countries,
in specific region, subregion, income group and year
Example
--------
> plot_bar("education_ratio", "Asia", "Western Asia", "Lower middle", "Bottom", [10_000, 1_000_000], [1968, 2015])
"""
alt.data_transformers.disable_max_rows()
# filter by Region, sub-region & Income group
data = filter_data(region, sub_region, income_grp)
# filter on pop_size
data = filter_popsize(data, pop_size)
# filter on year
data = data[(data["year"] == f"{year[1]}")]
# filter on top/bottom selection
data = get_topbtm_data(data, stat, top_btm, year)
chart = (
alt.Chart(
data,
title=f"{labels[stat]} - {top_btm} 5 Countries for {year[1]}",
)
.mark_bar()
.encode(
y=alt.Y("country", sort="-x", title="Country"),
x=alt.X(stat, title=labels[stat]),
color=alt.Color(
"country",
sort=alt.EncodingSortField("country", order="descending"),
title="Country",
),
tooltip=("name:O", stat + ":Q"),
)
.configure_axis(labelFontSize=12, titleFontSize=14)
.configure_title(fontSize=15)
.configure_legend(labelFontSize=12)
.properties(width=400, height=300)
)
return chart.to_html()
@app.callback(
Output("line", "srcDoc"),
Input("stat", "value"),
Input("region", "value"),
Input("sub_region", "value"),
Input("income_grp", "value"),
Input("top_btm", "value"),
Input("pop_size", "value"),
Input("year", "value"),
)
def plot_line(stat, region, sub_region, income_grp, top_btm, pop_size, year):
"""
Create line chart for statsitic of interested based on selected filters, for top 5 or bottom 5 countries
Parameters
--------
stat: string
Selection from statistic of interest filter
region: string
Selection from the Region filter
sub_region: sting
Selection from Sub Region filter
income_grp: string
Selection from Income Group filter
top_btm: string
Selection from Top/Bottom filter
pop_size: integer
Population size for which the data is displayed, from Population Size filter
year: integer
Year for which the data is displayed, from Year filter
Returns
--------
line
line chart showing statistic of interest for top 5 or bottom 5 countries,
in specific region, subregion, income group and year range
Example
--------
> plot_line("education_ratio", "Asia", "Western Asia", "Lower middle", "Bottom", [10_000, 1_000_000], [1968, 2015])
"""
alt.data_transformers.disable_max_rows()
# filter by Region, sub-region & Income group
data = filter_data(region, sub_region, income_grp)
# filter on pop_size
data = filter_popsize(data, pop_size)
# filter on top/bottom selection
data = get_topbtm_data(data, stat, top_btm, year)
# filter on year
data = data[(data["year"] >= f"{year[0]}") & (data["year"] <= f"{year[1]}")]
if(sub_region is not None):
data = stat
zoom = alt.selection_interval(
bind="scales",
on="[mousedown[!event.shiftKey], mouseup] > mousemove",
translate="[mousedown[!event.shiftKey], mouseup] > mousemove!",
)
line = (
alt.Chart(
data,
title=f"{labels[stat]} Trend - {top_btm} 5 Countries from {year[0]} - {year[1]}",
)
.mark_line()
.encode(
alt.X("year:T", title="Year"),
alt.Y(stat, title=labels[stat]),
color=alt.Color(
"country",
sort=alt.EncodingSortField("country", order="descending"),
# sort="-y",
title="Country",
),
tooltip=("name:O", stat + ":Q"),
)
.configure_axis(labelFontSize=12, titleFontSize=14)
.configure_title(fontSize=15)
.configure_legend(labelFontSize=12)
.properties(width=400, height=300)
).add_selection(zoom)
return line.to_html()
def get_topbtm_data(data, stat, top_btm, year):
"""
Filter data based on top 5 or bottom 5 countries selection
Parameters
--------
data: pandas dataframe
Data to be filtered
stat: string
Selection from statistic of interest filter
top_btm: string
Selection from Top/Bottom filter
year: integer
Year for which | |
# Vincenzo begin
#(assign,":should_spawn",1),
(try_begin),
(gt,":player_team_entrypoints_count",0), # More then 0 entry points are found.
(try_begin),
#(troop_get_slot,":value","trp_conquest_spawn_dummy",":player_no"),
(player_get_slot,":flag_id",":player_no",slot_player_selected_flag),
(store_add, ":cur_flag_slot", multi_data_flag_owner_begin, ":flag_id"),
(troop_get_slot, ":current_owner", "trp_multiplayer_data", ":cur_flag_slot"),
(store_add,":player_team_plus_1",":player_team",1),
# (assign,reg4,":value"),
# (assign,reg3,":player_team_plus_1"),
# (assign,reg2,":current_owner"),
# (str_store_string,s4,"@Selected Flag Owner: {reg2} Player Team plus 1: {reg3} Flag value: {reg4}"),
# (call_script, "script_multiplayer_broadcast_message"),
(eq,":player_team_plus_1",":current_owner"), # we have a flaggy for us selected =)
(store_mul,":current_flag_slot",":flag_id",50), # each 50 slots containt entry points for a flag.
(troop_get_slot, ":entry_point_count", "trp_entrypoints_per_flag_dummy", ":current_flag_slot"),
(val_add,":current_flag_slot",1),
(val_add,":entry_point_count",":current_flag_slot"),
#(val_add,":entry_point_count",1),
(store_random_in_range, ":spawn_entry_no", ":current_flag_slot", ":entry_point_count"),
(troop_get_slot, ":entry_point", "trp_entrypoints_per_flag_dummy", ":spawn_entry_no"),
# (assign,reg1,":flag_id"),
# (assign,reg2,":entry_point_count"),
# (assign,reg3,":spawn_entry_no"),
# (assign,reg4,":current_flag_slot"),
# (assign,reg5,":entry_point"),
# (str_store_string,s4,"@flag_id:{reg1} current_flag_slot+1:{reg4} entry_point_count:{reg2} spawn_entry_no:{reg3} thats entry_point:{reg5}"),
# (call_script, "script_multiplayer_broadcast_message"),
(else_try),
(store_random_in_range, ":spawn_entry_no", ":start_point", ":player_team_entrypoints"),
(troop_get_slot, ":entry_point", "trp_entrypoints_dummy", ":spawn_entry_no"),
(try_end),
(assign, reg0, ":entry_point"), # assign that bitch =)
(else_try),
# (str_store_player_username, s9, ":player_no"),
# (assign, reg9, ":spawn_near_flag"),
# (str_store_string, s4, "@WARNING! NO ENTRY POINT FOUND FOR PLAYER: {s9}"),
# (call_script, "script_multiplayer_broadcast_message"), # Broadcast message
(troop_get_inventory_slot, ":has_item", ":player_troop", ek_horse),
(try_begin),
(ge, ":has_item", 0),
(assign, ":is_horseman", 1),
(else_try),
(assign, ":is_horseman", 0),
(try_end),
# No entry points found, cryface bad shitty sucky map, just run the native spawn code I guess :(
(call_script, "script_multiplayer_find_spawn_point", ":player_team", 0, ":is_horseman"),
(try_end),
# Vincenzo end
#(eq,":should_spawn",1),
(player_spawn_new_agent, ":player_no", reg0),
(try_end),
# Vincenzo begin
(store_mission_timer_a, "$g_hq_last_spawn_wave"),
# Vincenzo end
]),
(1, 0, 0, [ (multiplayer_is_server),
(this_or_next|gt,"$g_multiplayer_num_bots_team_1",0),
(gt,"$g_multiplayer_num_bots_team_2",0), # are there any bots? :p
], #do this in every new frame, but not at the same time
[
(store_mission_timer_a, ":mission_timer"),
(ge, ":mission_timer", 2),
(assign, ":team_1_count", 0),
(assign, ":team_2_count", 0),
(try_for_agents, ":cur_agent"),
(agent_is_active, ":cur_agent"),
(agent_is_non_player, ":cur_agent"),
(agent_is_human, ":cur_agent"),
(assign, ":will_be_counted", 0),
(try_begin),
(agent_is_alive, ":cur_agent"),
(assign, ":will_be_counted", 1), #alive so will be counted
(else_try),
(agent_get_time_elapsed_since_removed, ":elapsed_time", ":cur_agent"),
(le, ":elapsed_time", "$g_multiplayer_respawn_period"),
(assign, ":will_be_counted", 1), #new died (< g_multiplayer_respawn_period) so will be counted too
(try_end),
(eq, ":will_be_counted", 1),
(agent_get_team, ":cur_team", ":cur_agent"),
(try_begin),
(eq, ":cur_team", 0),
(val_add, ":team_1_count", 1),
(else_try),
(eq, ":cur_team", 1),
(val_add, ":team_2_count", 1),
(try_end),
(try_end),
(store_sub, "$g_multiplayer_num_bots_required_team_1", "$g_multiplayer_num_bots_team_1", ":team_1_count"),
(store_sub, "$g_multiplayer_num_bots_required_team_2", "$g_multiplayer_num_bots_team_2", ":team_2_count"),
(val_max, "$g_multiplayer_num_bots_required_team_1", 0),
(val_max, "$g_multiplayer_num_bots_required_team_2", 0),
]),
# Beaver added end mission code
(1, 0, 5, [ (this_or_next|eq,"$g_conquest_map_end_confirm",0),
(eq,"$g_conquest_map_end_confirm",2),
],
[
(store_mission_timer_a,":timer"),
(gt,":timer",10),
(try_begin),
(eq,"$g_conquest_map_end_confirm",0),
(team_get_score,":team_1_score", 0),
(team_get_score,":team_2_score", 1),
(this_or_next|le, ":team_1_score", 0),
(le, ":team_2_score", 0),
(try_begin),
(neg|multiplayer_is_dedicated_server),
(start_presentation,"prsnt_message_conquest_round_ended"),
(try_end),
(assign,"$g_conquest_map_end_confirm",2),
(else_try),
(eq,"$g_conquest_map_end_confirm",2),
(assign,"$g_conquest_map_end_confirm",1),
(try_end),
]),
multiplayer_server_spawn_bots,
multiplayer_server_manage_bots,
# Vincenzo change seconds
(30, 0, 0, [(multiplayer_is_server),],
[
#auto team balance control in every 30 seconds (hq)
(call_script, "script_check_team_balance"),
]),
multiplayer_server_check_end_map,
(0, 0, 0, [(neg|multiplayer_is_dedicated_server),(key_clicked,key_m)],
[
(try_begin),
(neg|is_presentation_active,"prsnt_conquest_flag_select"),
(start_presentation,"prsnt_conquest_flag_select"),
(try_end),
]),
(ti_tab_pressed, 0, 0, [],
[
(try_begin),
(eq, "$g_multiplayer_mission_end_screen", 0),
(assign, "$g_multiplayer_stats_chart_opened_manually", 1),
(start_presentation, "prsnt_multiplayer_stats_chart"),
(try_end),
]),
multiplayer_once_at_the_first_frame,
(ti_escape_pressed, 0, 0, [],
[
(neg|is_presentation_active, "prsnt_multiplayer_escape_menu"),
(neg|is_presentation_active, "prsnt_multiplayer_stats_chart"),
(eq, "$g_waiting_for_confirmation_to_terminate", 0),
(start_presentation, "prsnt_multiplayer_escape_menu"),
]),
] + mm_multiplayer_common,
),
(
"multiplayer_cf",mtf_battle_mode,-1, #capture_the_flag mode
"You lead your men to battle.",
[
(0,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(1,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(2,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(3,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(4,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(5,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(6,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(7,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(8,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(9,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(10,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(11,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(12,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(13,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(14,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(15,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(16,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(17,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(18,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(19,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(20,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(21,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(22,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(23,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(24,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(25,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(26,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(27,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(28,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(29,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(30,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(31,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(32,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(33,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(34,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(35,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(36,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(37,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(38,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(39,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(40,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(41,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(42,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(43,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(44,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(45,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(46,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(47,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(48,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(49,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(50,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(51,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(52,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(53,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(54,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(55,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(56,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(57,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(58,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(59,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(60,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(61,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(62,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(63,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(64,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(65,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
],
[
common_battle_init_banner,
multiplayer_server_check_polls, multiplayer_server_generate_build_points,
multiplayer_server_bonuses,
(ti_on_agent_spawn, 0, 0, [],
[
(store_trigger_param_1, ":agent_no"),
(call_script, "script_multiplayer_server_on_agent_spawn_common", ":agent_no"),
]),
(ti_server_player_joined, 0, 0, [],
[
(store_trigger_param_1, ":player_no"),
(call_script, "script_multiplayer_server_player_joined_common", ":player_no"),
]),
(ti_before_mission_start, 0, 0, [],
[
(try_begin),
(multiplayer_is_server),
(store_current_scene, ":cur_scene"),
(this_or_next|eq, ":cur_scene", "scn_random_multi_plain_medium"),
(this_or_next|eq, ":cur_scene", "scn_random_multi_plain_large"),
(this_or_next|eq, ":cur_scene", "scn_random_multi_steppe_medium"),
(eq, ":cur_scene", "scn_random_multi_steppe_large"),
(entry_point_get_position, pos0, 0),
(entry_point_set_position, 64, pos0),
(entry_point_get_position, pos1, 32),
(entry_point_set_position, 65, pos1),
(try_end),
(assign, "$g_multiplayer_game_type", multiplayer_game_type_capture_the_flag),
(call_script, "script_multiplayer_server_before_mission_start_common"),
(assign, "$flag_1_at_ground_timer", 0),
(assign, "$flag_2_at_ground_timer", 0),
(call_script, "script_multiplayer_init_mission_variables"),
(call_script, "script_multiplayer_remove_headquarters_flags"),
#MM
(call_script, "script_multiplayer_mm_before_mission_start_common"),
]),
(ti_after_mission_start, 0, 0, [],
[
(call_script, "script_determine_team_flags", 0),
(call_script, "script_determine_team_flags", 1),
(set_spawn_effector_scene_prop_kind, 0, -1), #during this mission, agents of "team 0" will try to spawn around scene props with kind equal to -1(no effector for this mod)
(set_spawn_effector_scene_prop_kind, 1, -1), #during this mission, agents of "team 1" will try to spawn around scene props with kind equal to -1(no effector for this mod)
(try_begin),
(multiplayer_is_server),
(assign, "$g_multiplayer_ready_for_spawning_agent", 1),
(entry_point_get_position, pos0, multi_base_point_team_1),
(set_spawn_position, pos0),
(spawn_scene_prop, "$team_1_flag_scene_prop", 0),
(entry_point_get_position, pos0, multi_base_point_team_2),
(set_spawn_position, pos0),
(spawn_scene_prop, "$team_2_flag_scene_prop", 0),
(try_end),
(call_script, "script_initialize_all_scene_prop_slots"),
(call_script, "script_multiplayer_move_moveable_objects_initial_positions"),
#MM
(call_script, "script_multiplayer_mm_after_mission_start_common"),
]),
(ti_on_multiplayer_mission_end, 0, 0, [],
[
(assign, "$g_multiplayer_stats_chart_opened_manually", 0),
(start_presentation, "prsnt_multiplayer_stats_chart"),
]),
(ti_on_agent_killed_or_wounded, 0, 0, [],
[
(store_trigger_param_1, ":dead_agent_no"),
(store_trigger_param_2, ":killer_agent_no"),
(call_script, "script_multiplayer_server_on_agent_killed_or_wounded_common", ":dead_agent_no", ":killer_agent_no"),
#when an agent dies which carrying a flag, assign flag position to current position with
#ground level z and do not change it again according to dead agent's any coordinate/rotation.
(try_begin),
(multiplayer_is_server),
(agent_is_human, ":dead_agent_no"),
(agent_get_attached_scene_prop, ":attached_scene_prop", ":dead_agent_no"),
(ge, ":attached_scene_prop", 0), #moved from above after auto-set position
#for only server itself-----------------------------------------------------------------------------------------------
(call_script, "script_set_attached_scene_prop", ":dead_agent_no", -1),
(agent_set_horse_speed_factor, ":dead_agent_no", 100),
#for only server itself-----------------------------------------------------------------------------------------------
(try_for_range, ":player_no", 1, multiplayer_player_loops_end), #0 is server so starting from 1
(player_is_active, ":player_no"),
(multiplayer_send_2_int_to_player, ":player_no", multiplayer_event_set_attached_scene_prop, ":dead_agent_no", -1),
(try_end),
(prop_instance_get_position, pos0, ":attached_scene_prop"), #moved from above to here after auto-set position
(position_set_z_to_ground_level, pos0), #moved from above to here after auto-set position
(prop_instance_set_position, ":attached_scene_prop", pos0), #moved from above to here after auto-set position
(agent_get_team, ":dead_agent_team", ":dead_agent_no"),
(try_begin),
(eq, ":dead_agent_team", 0),
(assign, ":dead_agent_rival_team", 1),
(else_try),
(assign, ":dead_agent_rival_team", 0),
(try_end),
(team_set_slot, ":dead_agent_rival_team", slot_team_flag_situation, 2), #2-flag at ground
#for only server itself-----------------------------------------------------------------------------------------------
(call_script, "script_set_team_flag_situation", ":dead_agent_rival_team", 2),
#for only server itself-----------------------------------------------------------------------------------------------
(try_for_range, ":player_no", 1, multiplayer_player_loops_end), #0 is server so starting from 1
(player_is_active, ":player_no"),
(multiplayer_send_2_int_to_player, ":player_no", multiplayer_event_set_team_flag_situation, ":dead_agent_rival_team", 2), #flag at ground
(try_end),
(try_end),
]),
(1, 0, 0, [(multiplayer_is_server),], #returning flag if it is not touched by anyone in 60 seconds
[
(try_for_range, ":team_no", 0, 2),
(try_begin),
(team_slot_eq, ":team_no", slot_team_flag_situation, 2),
(assign, ":flag_team_no", -1),
(try_begin),
(eq, ":team_no", 0),
(val_add, "$flag_1_at_ground_timer", 1),
(ge, "$flag_1_at_ground_timer", multi_max_seconds_flag_can_stay_in_ground),
(assign, ":flag_team_no", 0),
(else_try),
(val_add, "$flag_2_at_ground_timer", 1),
(ge, "$flag_2_at_ground_timer", multi_max_seconds_flag_can_stay_in_ground),
(assign, ":flag_team_no", 1),
(try_end),
(try_begin),
(ge, ":flag_team_no", 0),
(try_begin),
(eq, ":flag_team_no", 0),
(assign, "$flag_1_at_ground_timer", 0),
(else_try),
(eq, ":flag_team_no", 1),
(assign, "$flag_2_at_ground_timer", 0),
(try_end),
#cur agent returned his own flag to its default position!
(team_set_slot, ":flag_team_no", slot_team_flag_situation, 0), #0-flag at base
#return team flag to its starting position.
#for only server itself-----------------------------------------------------------------------------------------------
(call_script, "script_set_team_flag_situation", ":flag_team_no", 0),
#for only server itself-----------------------------------------------------------------------------------------------
(try_for_range, ":player_no", 1, multiplayer_player_loops_end), #0 is server so starting from 1
(player_is_active, ":player_no"),
(multiplayer_send_2_int_to_player, ":player_no", multiplayer_event_set_team_flag_situation, ":flag_team_no", 0),
(try_end),
(scene_prop_get_instance, ":flag_red_id", "$team_1_flag_scene_prop", 0),
(scene_prop_get_instance, ":flag_blue_id", "$team_2_flag_scene_prop", 0),
(assign, ":team_1_flag_id", ":flag_red_id"),
(assign, ":team_1_base_entry_id", multi_base_point_team_1),
(assign, ":team_2_flag_id", ":flag_blue_id"),
(assign, ":team_2_base_entry_id", multi_base_point_team_2),
#return team flag to its starting position.
(try_begin),
(eq, ":flag_team_no", 0),
(entry_point_get_position, pos5, ":team_1_base_entry_id"), #moved from above to here after auto-set position
(prop_instance_set_position, ":team_1_flag_id", pos5), #moved from above to here after auto-set position
(else_try),
(entry_point_get_position, pos5, ":team_2_base_entry_id"), #moved from above to here after auto-set position
(prop_instance_set_position, ":team_2_flag_id", pos5), #moved from above to here after auto-set position
(try_end),
#(team_get_faction, ":team_faction", ":flag_team_no"),
#(str_store_faction_name, s1, ":team_faction"),
#(tutorial_message_set_position, 500, 500),
#(tutorial_message_set_size, 30, 30),
#(tutorial_message_set_center_justify, 1),
#(tutorial_message, "str_s1_returned_flag", 0xFFFFFFFF, 5),
(store_mul, ":minus_flag_team_no", ":flag_team_no", -1),
(val_sub, ":minus_flag_team_no", 1),
#for only server itself
(call_script, "script_show_multiplayer_message", multiplayer_message_type_flag_returned_home, ":minus_flag_team_no"),
#no need to send also server here
(try_for_range, ":player_no", 1, multiplayer_player_loops_end),
(player_is_active, ":player_no"),
(multiplayer_send_2_int_to_player, ":player_no", multiplayer_event_show_multiplayer_message, multiplayer_message_type_flag_returned_home, ":minus_flag_team_no"),
(try_end),
(try_end),
(else_try),
(try_begin),
(eq, ":team_no", 0),
(assign, "$flag_1_at_ground_timer", 0),
(else_try),
(assign, "$flag_2_at_ground_timer", 0),
(try_end),
(try_end),
(try_end),
]),
(1, 0, 0, [(multiplayer_is_server),],
[
# Vincenzo begin
# Store team 1 spawns
(entry_point_get_position, pos62, multi_base_point_team_1), # pos62 is search pos.
(assign, ":entrypoints_team_1", 1),
(call_script, "script_multiplayer_server_hq_get_entrypoints_for_flag",-1,":entrypoints_team_1"),
(assign, ":entrypoints_team_1", reg0),
(assign, ":entrypoints_count_team_1", reg1),
# Store team 2 spawns
(entry_point_get_position, pos62, multi_base_point_team_2), # pos62 is search pos.
(assign, ":entrypoints_team_2", 101),
(call_script, "script_multiplayer_server_hq_get_entrypoints_for_flag",-1,":entrypoints_team_2"),
(assign, ":entrypoints_team_2", reg0),
(assign, ":entrypoints_count_team_2", reg1),
# Vincenzo end
(try_for_range, ":player_no", "$g_player_loops_begin", multiplayer_player_loops_end),
(player_is_active, ":player_no"),
(neg|player_is_busy_with_menus, ":player_no"),
(player_get_team_no, ":player_team", ":player_no"), #if player is currently spectator do not spawn his agent
(lt, | |
<reponame>arturs-berzins/adaptmesh
import warnings
from itertools import chain
from math import ceil, cos, hypot, pi, sin, sqrt
from operator import itemgetter
from random import randint, random, seed, shuffle
seed(0)
try:
from geompreds import incircle, orient2d
except ImportError:
warnings.warn(
"Robust predicates not available, falling back on non-robust implementation"
)
def orient2d(pa, pb, pc):
"""Direction from pa to pc, via pb, where returned value is as follows:
left: + [ = ccw ]
straight: 0.
right: - [ = cw ]
returns twice signed area under triangle pa, pb, pc
"""
detleft = (pa[0] - pc[0]) * (pb[1] - pc[1])
detright = (pa[1] - pc[1]) * (pb[0] - pc[0])
det = detleft - detright
return det
def incircle(pa, pb, pc, pd):
"""Tests whether pd is in circle defined by the 3 points pa, pb and pc"""
adx = pa[0] - pd[0]
bdx = pb[0] - pd[0]
cdx = pc[0] - pd[0]
ady = pa[1] - pd[1]
bdy = pb[1] - pd[1]
cdy = pc[1] - pd[1]
bdxcdy = bdx * cdy
cdxbdy = cdx * bdy
alift = adx * adx + ady * ady
cdxady = cdx * ady
adxcdy = adx * cdy
blift = bdx * bdx + bdy * bdy
adxbdy = adx * bdy
bdxady = bdx * ady
clift = cdx * cdx + cdy * cdy
det = (
alift * (bdxcdy - cdxbdy)
+ blift * (cdxady - adxcdy)
+ clift * (adxbdy - bdxady)
)
return det
class FiniteEdgeIterator(object):
def __init__(self, triangulation, constraints_only=False):
self.triangulation = triangulation
self.constraints_only = constraints_only
self.current_idx = 0 # this is index in the list
self.pos = -1 # this is index in the triangle (side)
def __iter__(self):
return self
def __next__(self):
ret = None
trilist = list(self.triangulation.triangles)
while self.current_idx < len(trilist):
triangle = trilist[self.current_idx]
# skip this triangle if it is an infinite triangle
if not triangle.is_finite:
self.pos = -1
self.current_idx += 1
continue
self.pos += 1
neighbour = triangle.neighbours[self.pos]
# output edges only once:
# inside the triangulation only the triangle with lowest id its edge
# is output along the convex hull we always output the edge
if id(triangle) < id(neighbour) or not neighbour.is_finite:
if self.constraints_only and triangle.constrained[self.pos]:
ret = Edge(triangle, self.pos)
elif not self.constraints_only:
ret = Edge(triangle, self.pos)
if self.pos == 2:
self.pos = -1
self.current_idx += 1
if ret is not None:
return ret
else:
raise StopIteration()
class TriangleIterator(object):
"""Iterator over all triangles that are in the triangle data structure.
The finite_only parameter determines whether only the triangles in the
convex hull of the point set are iterated over, or whether also infinite
triangles are considered.
"""
def __init__(self, triangulation, finite_only=False):
self.triangulation = triangulation
self.finite_only = finite_only
self.visited = set()
self.to_visit_stack = [self.triangulation.external]
def __iter__(self):
return self
def __next__(self):
ret = None
while self.to_visit_stack:
triangle = self.to_visit_stack.pop()
# determine whether we should 'emit' the triangle
if (
self.finite_only
and id(triangle) not in self.visited
and triangle.is_finite
):
ret = triangle
elif not self.finite_only and id(triangle) not in self.visited:
ret = triangle
self.visited.add(id(triangle))
# NOTE: from an external triangle we can get
# to a triangle in the triangulation multiple times
for i in range(3):
neighbour = triangle.neighbours[i]
if neighbour is None:
continue
elif id(neighbour) not in self.visited:
self.to_visit_stack.append(neighbour)
if ret is not None:
return ret
else:
raise StopIteration()
class ConvexHullTriangleIterator(TriangleIterator):
"""Iterator over all triangles that are in the convex hull of the
point set (excludes infinite triangles).
"""
def __init__(self, triangulation):
# Actually, we are an alias for TriangleIterator
# with finite_only set to True
super(ConvexHullTriangleIterator, self).__init__(triangulation, True)
class InteriorTriangleIterator(object):
"""Iterator over all triangles that are enclosed by constraints
Assumes that a polygon has been triangulated which is closed properly
and that the polygon consists of *exactly one* connected component!
"""
def __init__(self, triangulation):
constrained = False
self.triangulation = triangulation
self.visited = set([id(self.triangulation.external)])
self.to_visit_stack = [self.triangulation.external.neighbours[2]]
# walk to an interior triangle
while not constrained and self.to_visit_stack:
triangle = self.to_visit_stack.pop()
assert triangle is not None
self.visited.add(id(triangle))
# NOTE: from an external triangle we can get
# to a triangle in the triangulation multiple times
for i in range(3):
constrained = triangle.constrained[i]
neighbour = triangle.neighbours[i]
if constrained:
self.to_visit_stack = [neighbour]
self.visited = set()
break
if neighbour is not None and id(neighbour) not in self.visited:
self.to_visit_stack.append(neighbour)
def __iter__(self):
return self
def __next__(self):
ret = None
constrained = False
while self.to_visit_stack:
triangle = self.to_visit_stack.pop()
if id(triangle) not in self.visited:
ret = triangle
self.visited.add(id(triangle))
# NOTE: from an external triangle we can get
# to a triangle in the triangulation multiple times
for i in range(3):
constrained = triangle.constrained[i]
if constrained:
continue
neighbour = triangle.neighbours[i]
if id(neighbour) not in self.visited:
self.to_visit_stack.append(neighbour)
if ret is not None:
return ret
else:
raise StopIteration()
class RegionatedTriangleIterator(object):
"""Iterator over all triangles that are fenced off by constraints.
The constraints fencing off triangles determine the regions.
The iterator yields a tuple: (region number, depth, triangle).
Note:
- The region number can increase in unexpected ways, e.g. 0, 1, 476, 1440,
..., etc.
- The depth gives the nesting of the regions.
The first group is always the infinite part (at depth 0) of the domain
around the feature (the parts of the convex hull not belonging to any
interior part).
"""
def __init__(self, triangulation):
# start at the exterior
self.triangulation = triangulation
self.visited = set([id(self.triangulation.external)])
self.to_visit_stack = [(self.triangulation.external.neighbours[2], 0)]
self.later = []
self.group = 0
def __iter__(self):
return self
def __next__(self):
while self.to_visit_stack or self.later:
# visit all triangles in the exterior, subsequently visit
# all triangles that are enclosed by a set of segments
while self.to_visit_stack:
triangle, depth = self.to_visit_stack.pop()
assert triangle is not None
if triangle in self.visited:
continue
self.visited.add(triangle)
for i in range(3):
constrained = triangle.constrained[i]
neighbour = triangle.neighbours[i]
if constrained and neighbour not in self.visited:
self.later.append((neighbour, depth + 1))
elif (
neighbour is not None and neighbour not in self.visited
):
self.to_visit_stack.append((neighbour, depth))
return (self.group, depth, triangle)
# flip the next level with this
if self.later:
self.group += 1
for _ in range(len(self.later)):
t, d = self.later.pop()
if id(t) not in self.visited:
self.to_visit_stack = [(t, d)]
break
else:
raise StopIteration()
class StarEdgeIterator(object):
"""Returns iterator over edges in the star of the vertex
The edges are returned in counterclockwise order around the vertex.
The triangles that the edges are associated with share the vertex
that this iterator is constructed with.
"""
def __init__(self, vertex): # , finite_only = True):
self.vertex = vertex
self.start = vertex.triangle
self.triangle = self.start
self.side = ccw(self.start.vertices.index(self.vertex))
self.done = False
def __iter__(self):
return self
def __next__(self):
if not self.done:
self.triangle = self.triangle.neighbours[self.side]
assert self.triangle is not None
# self.visited.append(self.triangle)
# try:
side = self.triangle.vertices.index(self.vertex)
# except ValueError, err:
# print err
# print [id(t) for t in self.visited]
# raise
# side = (self.side + 1) % 3
assert self.triangle.vertices[side] is self.vertex
e = Edge(self.triangle, side)
self.side = ccw(side)
if self.triangle is self.start:
self.done = True
return e
else: # we are at start again
raise StopIteration()
class DuplicatePointsFoundError(Exception):
pass
class TopologyViolationError(Exception):
pass
def box(points):
"""Obtain a tight fitting axis-aligned box around point set"""
xmin = min(points, key=lambda x: x[0])[0]
ymin = min(points, key=lambda x: x[1])[1]
xmax = max(points, key=lambda x: x[0])[0]
ymax = max(points, key=lambda x: x[1])[1]
return (xmin, ymin), (xmax, ymax)
def ccw(i):
"""Get index (0, 1 or 2) increased with one (ccw)"""
return (i + 1) % 3
def cw(i):
"""Get index (0, 1 or 2) decreased with one (cw)"""
return (i - 1) % 3
def apex(side):
"""Given a side, give the apex of the triangle"""
return side % 3
def orig(side):
"""Given a side, give the origin of the triangle"""
return (side + 1) % 3 # ccw(side)
def dest(side):
"""Given a side, give the destination of the triangle"""
return (side - 1) % 3 # cw(side)
def output_vertices(V, fh):
"""Output list of vertices as WKT to text file (for QGIS)"""
fh.write("id;wkt;finite;info\n")
for v in V:
fh.write(
"{0};POINT({1});{2};{3}\n".format(id(v), v, v.is_finite, v.info)
)
def output_triangles(T, fh):
"""Output list | |
<filename>_v5_proc_txt2img.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# COPYRIGHT (C) 2014-2020 <NAME>.
# This software is released under the MIT License.
# https://github.com/konsan1101
# Thank you for keeping the rules.
import sys
import os
import time
import datetime
import codecs
import glob
import queue
import threading
import subprocess
import numpy as np
import cv2
from PIL import Image, ImageDraw, ImageFont
# 共通ルーチン
import _v5__qRiKi
qRiKi = _v5__qRiKi.qRiKi_class()
import _v5__qFunc
qFunc = _v5__qFunc.qFunc_class()
import _v5__qLog
qLog = _v5__qLog.qLog_class()
qPLATFORM = qRiKi.getValue('qPLATFORM' )
qRUNATTR = qRiKi.getValue('qRUNATTR' )
qHOSTNAME = qRiKi.getValue('qHOSTNAME' )
qUSERNAME = qRiKi.getValue('qUSERNAME' )
qPath_pictures = qRiKi.getValue('qPath_pictures' )
qPath_videos = qRiKi.getValue('qPath_videos' )
qPath_cache = qRiKi.getValue('qPath_cache' )
qPath_sounds = qRiKi.getValue('qPath_sounds' )
qPath_icons = qRiKi.getValue('qPath_icons' )
qPath_fonts = qRiKi.getValue('qPath_fonts' )
qPath_log = qRiKi.getValue('qPath_log' )
qPath_work = qRiKi.getValue('qPath_work' )
qPath_rec = qRiKi.getValue('qPath_rec' )
qPath_s_ctrl = qRiKi.getValue('qPath_s_ctrl' )
qPath_s_inp = qRiKi.getValue('qPath_s_inp' )
qPath_s_wav = qRiKi.getValue('qPath_s_wav' )
qPath_s_jul = qRiKi.getValue('qPath_s_jul' )
qPath_s_STT = qRiKi.getValue('qPath_s_STT' )
qPath_s_TTS = qRiKi.getValue('qPath_s_TTS' )
qPath_s_TRA = qRiKi.getValue('qPath_s_TRA' )
qPath_s_play = qRiKi.getValue('qPath_s_play' )
qPath_v_ctrl = qRiKi.getValue('qPath_v_ctrl' )
qPath_v_inp = qRiKi.getValue('qPath_v_inp' )
qPath_v_jpg = qRiKi.getValue('qPath_v_jpg' )
qPath_v_detect = qRiKi.getValue('qPath_v_detect' )
qPath_v_cv = qRiKi.getValue('qPath_v_cv' )
qPath_v_photo = qRiKi.getValue('qPath_v_photo' )
qPath_v_msg = qRiKi.getValue('qPath_v_msg' )
qPath_d_ctrl = qRiKi.getValue('qPath_d_ctrl' )
qPath_d_play = qRiKi.getValue('qPath_d_play' )
qPath_d_prtscn = qRiKi.getValue('qPath_d_prtscn' )
qPath_d_movie = qRiKi.getValue('qPath_d_movie' )
qPath_d_upload = qRiKi.getValue('qPath_d_upload' )
qBusy_dev_cpu = qRiKi.getValue('qBusy_dev_cpu' )
qBusy_dev_com = qRiKi.getValue('qBusy_dev_com' )
qBusy_dev_mic = qRiKi.getValue('qBusy_dev_mic' )
qBusy_dev_spk = qRiKi.getValue('qBusy_dev_spk' )
qBusy_dev_cam = qRiKi.getValue('qBusy_dev_cam' )
qBusy_dev_dsp = qRiKi.getValue('qBusy_dev_dsp' )
qBusy_dev_scn = qRiKi.getValue('qBusy_dev_scn' )
qBusy_s_ctrl = qRiKi.getValue('qBusy_s_ctrl' )
qBusy_s_inp = qRiKi.getValue('qBusy_s_inp' )
qBusy_s_wav = qRiKi.getValue('qBusy_s_wav' )
qBusy_s_STT = qRiKi.getValue('qBusy_s_STT' )
qBusy_s_TTS = qRiKi.getValue('qBusy_s_TTS' )
qBusy_s_TRA = qRiKi.getValue('qBusy_s_TRA' )
qBusy_s_play = qRiKi.getValue('qBusy_s_play' )
qBusy_v_ctrl = qRiKi.getValue('qBusy_v_ctrl' )
qBusy_v_inp = qRiKi.getValue('qBusy_v_inp' )
qBusy_v_QR = qRiKi.getValue('qBusy_v_QR' )
qBusy_v_jpg = qRiKi.getValue('qBusy_v_jpg' )
qBusy_v_CV = qRiKi.getValue('qBusy_v_CV' )
qBusy_d_ctrl = qRiKi.getValue('qBusy_d_ctrl' )
qBusy_d_inp = qRiKi.getValue('qBusy_d_inp' )
qBusy_d_QR = qRiKi.getValue('qBusy_d_QR' )
qBusy_d_rec = qRiKi.getValue('qBusy_d_rec' )
qBusy_d_telework = qRiKi.getValue('qBusy_d_telework' )
qBusy_d_play = qRiKi.getValue('qBusy_d_play' )
qBusy_d_browser = qRiKi.getValue('qBusy_d_browser' )
qBusy_d_upload = qRiKi.getValue('qBusy_d_upload' )
qRdy__s_force = qRiKi.getValue('qRdy__s_force' )
qRdy__s_fproc = qRiKi.getValue('qRdy__s_fproc' )
qRdy__s_sendkey = qRiKi.getValue('qRdy__s_sendkey' )
qRdy__v_mirror = qRiKi.getValue('qRdy__v_mirror' )
qRdy__v_reader = qRiKi.getValue('qRdy__v_reader' )
qRdy__v_sendkey = qRiKi.getValue('qRdy__v_sendkey' )
qRdy__d_reader = qRiKi.getValue('qRdy__d_reader' )
qRdy__d_sendkey = qRiKi.getValue('qRdy__d_sendkey' )
# フォント
qFont_default = {'file':qPath_fonts + '_vision_font_ipaexg.ttf','offset':8}
qFont_status = {'file':qPath_fonts + '_vision_font_ipag.ttf','offset':8}
qFont_zh = {'file':'C:/Windows/Fonts/msyh.ttc', 'offset':5}
qFont_ko = {'file':'C:/Windows/Fonts/batang.ttc', 'offset':10}
class proc_txt2img:
def __init__(self, name='thread', id='0', runMode='debug', drawWidth='0', ):
self.runMode = runMode
self.drawWidth = drawWidth
self.breakFlag = threading.Event()
self.breakFlag.clear()
self.name = name
self.id = id
self.proc_id = '{0:10s}'.format(name).replace(' ', '_')
self.proc_id = self.proc_id[:-2] + '_' + str(id)
if (runMode == 'debug'):
self.logDisp = True
else:
self.logDisp = False
qLog.log('info', self.proc_id, 'init', display=self.logDisp, )
self.proc_s = None
self.proc_r = None
self.proc_main = None
self.proc_beat = None
self.proc_last = None
self.proc_step = '0'
self.proc_seq = 0
# 変数設定
self.flag_background = 'on'
self.flag_blackwhite = 'black'
def __del__(self, ):
qLog.log('info', self.proc_id, 'bye!', display=self.logDisp, )
def begin(self, ):
#qLog.log('info', self.proc_id, 'start')
self.fileRun = qPath_work + self.proc_id + '.run'
self.fileRdy = qPath_work + self.proc_id + '.rdy'
self.fileBsy = qPath_work + self.proc_id + '.bsy'
qFunc.statusSet(self.fileRun, False)
qFunc.statusSet(self.fileRdy, False)
qFunc.statusSet(self.fileBsy, False)
self.proc_s = queue.Queue()
self.proc_r = queue.Queue()
self.proc_main = threading.Thread(target=self.main_proc, args=(self.proc_s, self.proc_r, ))
self.proc_beat = time.time()
self.proc_last = time.time()
self.proc_step = '0'
self.proc_seq = 0
self.proc_main.setDaemon(True)
self.proc_main.start()
def abort(self, waitMax=5, ):
qLog.log('info', self.proc_id, 'stop', display=self.logDisp, )
self.breakFlag.set()
chktime = time.time()
while (not self.proc_beat is None) and ((time.time() - chktime) < waitMax):
time.sleep(0.25)
chktime = time.time()
while (os.path.exists(self.fileRun)) and ((time.time() - chktime) < waitMax):
time.sleep(0.25)
def put(self, data, ):
self.proc_s.put(data)
return True
def checkGet(self, waitMax=5, ):
chktime = time.time()
while (self.proc_r.qsize() == 0) and ((time.time() - chktime) < waitMax):
time.sleep(0.10)
data = self.get()
return data
def get(self, ):
if (self.proc_r.qsize() == 0):
return ['', '']
data = self.proc_r.get()
self.proc_r.task_done()
return data
def main_proc(self, cn_r, cn_s, ):
# ログ
qLog.log('info', self.proc_id, 'start', display=self.logDisp, )
qFunc.statusSet(self.fileRun, True)
self.proc_beat = time.time()
# 初期設定
self.proc_step = '1'
# フォント
font16_default = ImageFont.truetype(qFont_default['file'], 16, encoding='unic')
font16_defaulty = qFont_default['offset']
font16_status = ImageFont.truetype(qFont_status[ 'file'], 16, encoding='unic')
font16_statusy = qFont_status[ 'offset']
font32_default = ImageFont.truetype(qFont_default['file'], 32, encoding='unic')
font32_defaulty = qFont_default['offset']
font48_default = ImageFont.truetype(qFont_default['file'], 48, encoding='unic')
font48_defaulty = qFont_default['offset']
if (os.path.exists(qFont_zh['file'])):
font32_zh = ImageFont.truetype(qFont_zh['file'] , 32, encoding='unic')
font32_zhy = qFont_zh['offset']
else:
font32_zh = ImageFont.truetype(qFont_default['file'], 32, encoding='unic')
font32_zhy = qFont_default['offset']
if (os.path.exists(qFont_ko['file'])):
font32_ko = ImageFont.truetype(qFont_ko['file'] , 32, encoding='unic')
font32_koy = qFont_ko['offset']
else:
font32_ko = ImageFont.truetype(qFont_default['file'], 32, encoding='unic')
font32_koy = qFont_default['offset']
# 待機ループ
self.proc_step = '5'
while (self.proc_step == '5'):
self.proc_beat = time.time()
# 停止要求確認
if (self.breakFlag.is_set()):
self.breakFlag.clear()
self.proc_step = '9'
break
# キュー取得
if (cn_r.qsize() > 0):
cn_r_get = cn_r.get()
inp_name = cn_r_get[0]
inp_value = cn_r_get[1]
cn_r.task_done()
else:
inp_name = ''
inp_value = ''
if (cn_r.qsize() > 1) or (cn_s.qsize() > 20):
qLog.log('warning', self.proc_id, 'queue overflow warning!, ' + str(cn_r.qsize()) + ', ' + str(cn_s.qsize()))
# レディ設定
if (qFunc.statusCheck(self.fileRdy) == False):
qFunc.statusSet(self.fileRdy, True)
# ステータス応答
if (inp_name.lower() == '_status_'):
out_name = inp_name
out_value = '_ready_'
cn_s.put([out_name, out_value])
# 表示連携
elif (inp_name.lower() == '_flag_background_'):
self.flag_background = inp_value
elif (inp_name.lower() == '_flag_blackwhite_'):
self.flag_blackwhite = inp_value
# 処理
elif (inp_name.lower() == '[txts]') \
or (inp_name.lower() == '[status]') \
or (inp_name.lower() == '[message_txts]'):
# 実行カウンタ
self.proc_last = time.time()
self.proc_seq += 1
if (self.proc_seq > 9999):
self.proc_seq = 1
# ビジー設定
if (qFunc.statusCheck(self.fileBsy) == False):
qFunc.statusSet(self.fileBsy, True)
# 文字列確認
texts = inp_value
maxlen = 0
for i in range(0, len(texts)):
if (texts[i][2:3] != ','):
if (qFunc.in_japanese(texts[i]) == True):
lenstr = len(texts[i]) * 2
else:
lenstr = len(texts[i])
else:
if (texts[i][:3] == 'ja,') \
or (texts[i][:3] == 'zh,') \
or (texts[i][:3] == 'ko,'):
lenstr = len(texts[i]) * 2
else:
lenstr = len(texts[i])
if (maxlen < lenstr):
maxlen = lenstr
# 描写キャンバス作成
if (inp_name.lower() == '[status]'):
draw_width = int(self.drawWidth)
draw_height = int(10 + (16 + 10) * len(texts))
if (draw_width == 0):
draw_width = 180
if (inp_name.lower() == '[txts]'):
draw_width = int(self.drawWidth)
draw_height = int(10 + (32 + 10) * len(texts))
if (draw_width == 0):
draw_width = int(50 + 16 * maxlen)
if (inp_name.lower() == '[message_txts]'):
draw_width = int(self.drawWidth)
draw_height = int(10 + (48 + 10) * len(texts))
if (draw_width == 0):
draw_width = int(100 + 24 * maxlen)
if (self.flag_blackwhite != 'white'):
text_img = Image.new('RGB', (draw_width, draw_height), (255,255,255))
else:
text_img = Image.new('RGB', (draw_width, draw_height), ( 0, 0, 0))
text_draw = ImageDraw.Draw(text_img)
# 文字描写
for i in range(0, len(texts)):
if (self.flag_blackwhite != 'white'):
txt_color = ( 0, 0, 0)
else:
txt_color = (255,255,255)
if (inp_name.lower() == '[status]'):
if (texts[i].find('busy!' )>=0) \
or (texts[i].find('slow!' )>=0) \
or (texts[i].find('disable!')>=0) \
or (texts[i].find('rec!' )>=0):
text_draw.rectangle((0, 5 + (16 + 10)*i, draw_width, (16 + 10)*(i+1)-1), fill=(0xff, 0x00, 0xff))
txt_color = ( 0, 0, 0)
if (texts[i].find('active' )>=0):
text_draw.rectangle((0, 5 + (16 + 10)*i, draw_width, (16 + 10)*(i+1)-1), fill=(0xff, 0xff, 0x00))
txt_color = ( 0, 0, 0)
if (texts[i].find('ready' )>=0):
text_draw.rectangle((0, 5 + (16 + 10)*i, draw_width, (16 + 10)*(i+1)-1), fill=(0x00, 0xff, 0x00))
txt_color = ( 0, 0, 0)
text_draw.text((5, (16 + 10)*i + font16_statusy), texts[i], font=font16_status, fill=txt_color)
if (inp_name.lower() == '[txts]'):
if (texts[i][2:3] != ','):
text_draw.text((16, (32 + 10)*i + font32_defaulty), texts[i], font=font32_default, fill=txt_color)
else:
if (texts[i][:3] == 'zh,'):
text_draw.text((16, (32 + 10)*i + font32_zhy), texts[i], font=font32_zh, fill=txt_color)
elif (texts[i][:3] == 'ko,'):
text_draw.text((16, (32 + 10)*i + font32_koy), texts[i], font=font32_ko, fill=txt_color)
else:
text_draw.text((16, (32 + 10)*i + font32_defaulty), texts[i], font=font32_default, fill=txt_color)
if (inp_name.lower() == '[message_txts]'):
text_draw.text((24, (48 + 10)*i + font48_defaulty), texts[i], font=font48_default, fill=txt_color)
# 結果出力
if (inp_name.lower() == '[status]'):
out_name = '[status_img]'
out_value = np.asarray(text_img)
cn_s.put([out_name, out_value])
if (inp_name.lower() == '[txts]'):
out_name = '[txts_img]'
out_value = np.asarray(text_img)
cn_s.put([out_name, out_value])
if (inp_name.lower() == '[message_txts]'):
out_name = '[message_img]'
out_value = np.asarray(text_img)
cn_s.put([out_name, out_value])
# ビジー解除
if (cn_r.qsize() == 0):
qFunc.statusSet(self.fileBsy, False)
# アイドリング
slow = False
if (qFunc.statusCheck(qBusy_dev_cpu) == True):
slow = True
if (slow == True):
time.sleep(1.00)
else:
if (cn_r.qsize() == 0):
time.sleep(0.25)
else:
time.sleep(0.05)
# 終了処理
if (True):
# レディ解除
qFunc.statusSet(self.fileRdy, False)
# ビジー解除
qFunc.statusSet(self.fileBsy, False)
# キュー削除
while (cn_r.qsize() > 0):
cn_r_get = cn_r.get()
cn_r.task_done()
while (cn_s.qsize() > 0):
cn_s_get = cn_s.get()
cn_s.task_done()
# ログ
qLog.log('info', self.proc_id, 'end', display=self.logDisp, )
qFunc.statusSet(self.fileRun, False)
self.proc_beat = None
if __name__ == '__main__':
# 共通クラス
qRiKi.init()
qFunc.init()
# ログ
nowTime = datetime.datetime.now()
filename = qPath_log + nowTime.strftime('%Y%m%d.%H%M%S') + '.' + os.path.basename(__file__) + '.log'
qLog.init(mode='logger', filename=filename, )
# 設定
cv2.namedWindow('Display', 1)
cv2.moveWindow( 'Display', 0, 0)
# テスト
txt2img_thread = proc_txt2img('txt2img', '0', )
txt2img_thread.begin()
txt2img_thread.put(['[txts]', [u'おはようございます']])
resdata = txt2img_thread.checkGet()
if (resdata[0] == '[txts_img]'):
img = resdata[1].copy()
cv2.namedWindow('Display', 1)
cv2.imshow('Display', img )
cv2.waitKey(1)
time.sleep(5)
txt2img_thread.put(['flag_blackwhite', 'white'])
txt2img_thread.put(['[txts]', [u'こんにちは', u'はじめまして']])
resdata = txt2img_thread.checkGet()
if (resdata[0] == '[txts_img]'):
img = resdata[1].copy()
cv2.namedWindow('Display', 1)
cv2.imshow('Display', img )
cv2.waitKey(1)
time.sleep(5)
| |
5000 } , ## configuration of `pdf.generate`
... { 'ncpus' : 2 } , ## configuration of `pdf.fitTo`
... { 'mean' : 0.0 , 'sigma' : 1.0 } ## parameters to use for generation
... )
Derived parameters can be also retrived via <code>more_vars</code> argument:
>>> ratio = lambda res,pdf : res.ratio('x','y')
>>> more_vars = { 'Ratio' : ratio }
>>> r, s = make_toys ( .... , more_vars = more_vars , ... )
- If `gen_fun` is not specified `generate_data` is used
- If `fit_fun` is not specified `make_fit` is used
- If `accept_fun` is not specified `accept_fit` is used
"""
from ostap.core.ostap_types import string_types, integer_types
assert isinstance ( nToys , integer_types ) and 0 < nToys,\
'Invalid "nToys" argument %s/%s' % ( nToys , type ( nToys ) )
assert gen_config and 'nEvents' in gen_config,\
'Number of events per toy must be specified via "gen_config" %s' % gen_config
## 1. generator function?
if gen_fun is None :
if not silent : logger.info ( "make_toys: use default ``generate_data'' function!")
gen_fun = generate_data
assert gen_fun and callable ( gen_fun ) , 'Invalid generator function!'
## 2. fitting function?
if fit_fun is None :
if not silent : logger.info ( "make_toys: use default ``make_fit'' function!")
fit_fun = make_fit
assert fit_fun and callable ( fit_fun ) , 'Invalid fit function!'
## 3. accept function?
if accept_fun is None :
if not silent : logger.info ( "make_toys: use default ``accept_fit'' function!")
accept_fun = accept_fit
assert accept_fun and callable ( accept_fun ) , 'Invalid accept function!'
if progress and not silent :
assert isinstance ( frequency , integer_types ) and 0 < frequency,\
"make_toys: invalid ``frequency'' parameter %s" % frequency
import ostap.fitting.roofit
import ostap.fitting.dataset
import ostap.fitting.variables
import ostap.fitting.roofitresult
import ostap.fitting.basic
params = pdf.params ()
varset = ROOT.RooArgSet()
if isinstance ( data , ROOT.RooAbsData ) : varset = data.varset()
else :
for v in data :
if isinstance ( v , ROOT.RooAbsArg ) :
varset.add ( v )
elif isinstance ( v , string_types ) and v in params :
varset.add ( params [ v ] )
else :
raise TypeError('Invalid variable %s/%s' % ( v , type ( v ) ) )
fix_pars = vars_transform ( params )
fix_init = vars_transform ( init_pars )
pdf.load_params ( params = fix_pars , silent = silent )
pdf.load_params ( params = fix_init , silent = silent )
## save all initial parameters (needed for the final statistics)
params = pdf.params ()
fix_all = vars_transform ( params )
fitcnf = {}
fitcnf.update ( fit_config )
if not 'silent' in fitcnf : fitcnf [ 'silent' ] = silent
from collections import defaultdict
results = defaultdict(list)
from ostap.core.core import SE, VE
fits = defaultdict ( SE ) ## fit statuses
covs = defaultdict ( SE ) ## covariance matrix quality
## run pseudoexperiments
from ostap.utils.progress_bar import progress_bar
for i in progress_bar ( range ( nToys ) , silent = not progress ) :
## 1. reset PDF parameters
pdf.load_params ( params = fix_pars , silent = silent )
pdf.load_params ( params = init_pars , silent = silent )
## 2. generate dataset!
## dataset = pdf.generate ( varset = varset , **gen_config )
dataset = gen_fun ( pdf , varset = varset , **gen_config )
if not silent : logger.info ( 'Generated dataset #%d\n%s' % ( i , dataset ) )
## 3. fit it!
r = fit_fun ( pdf , dataset , **fitcnf )
## fit status
fits [ r.status () ] += 1
## covariance matrix quality
covs [ r.covQual () ] += 1
## ok ?
if accept_fun ( r , pdf , dataset ) :
## 4. save results
rpf = r.params ( float_only = True )
for p in rpf :
results [ p ].append ( rpf [ p ][0] )
for v in more_vars :
func = more_vars[v]
results [ v ] .append ( func ( r , pdf ) )
results [ '#' ] .append ( len ( dataset ) )
results [ '#sumw' ] .append ( dataset.sumVar ( '1' ) )
dataset.clear()
del dataset
del r
if progress or not silent :
if 0 < frequency and 1 <= i and 0 == ( i + 1 ) % frequency :
stats = make_stats ( results , fits , covs )
print_stats ( stats , i + 1 , logger = logger )
## make a final statistics
stats = make_stats ( results , fits , covs )
if progress or not silent :
print_stats ( stats , nToys , logger = logger )
return results, stats
# =============================================================================
## make <code>nToys</code> pseudoexperiments
#
# Schematically:
# @code
# for toy in range ( nToys ) :
# ... dataset = gen_fun ( gen_pdf , ... , **gen_config )
# ... result = fit_fun ( fit_pdf , dataset , **fit_config )
# ... if not accept_fun ( result , fit_pdf , dataset ) : continue
# .... < collect statistics here >
# @endcode
#
# For each experiment
# - generate dataset using <code>pdf</code> with variables specified
# in <code>data</code> and configuration specified via<code>gen_config</code>
# for each generation the parameters of <code>pdf</code> are reset
# for their initial values and values from <code>init_pars</code>
# - fit generated dataset with <code>pdf</code> using configuration
# specified via <code>fit_config</code>
#
# @code
# gen_pdf = ... ## PDF to use to generate pseudoexperiments
# fit_pdf = ... ## PDF to use to fit pseudoexperiments
# results , stats = make_toys2(
# gen_pdf = gen_pdf , ## PDF to use to generate pseudoexperiments
# fit_pdf = fit_pdf , ## PDF to use to fit pseudoexperiments
# nToys = 1000 , ## number of pseudoexperiments
# data = [ 'mass' ] , ## variables in dataset
# gen_config = { 'nEvents' : 5000 } , ## configuration of <code>pdf.generate</code>
# fit_config = { 'ncpus' : 2 } , ## configuration of <code>pdf.fitTo</code>
# gen_pars = { 'mean' : 0.0 , 'sigma' : 1.0 } ## parameters to use for generation
# )
# @endcode
#
# Derived parameters can be also retrived via <code>more_vars</code> argument:
# @code
# ratio = lambda res,pdf : res.ratio('x','y')
# more_vars = { 'Ratio' : ratio }
# r, s = make_toys2 ( .... , more_vars = more_vars , ... )
# @code
#
# @param gen_pdf PDF to be used for generation
# @param fit_pdf PDF to be used for fitting
# @param nToys number of pseudoexperiments to generate
# @param data variable list of variables to be used for dataset generation
# @param gen_config configuration of <code>pdf.generate</code>
# @param fit_config configuration of <code>pdf.fitTo</code>
# @param gen_pars redefine these parameters for each pseudoexperiment
# @param fit_pars redefine these parameters for each pseudoexperiment
# @param more_vars calculate more variables form fit-result
# @param gen_fun specific generate action (if needed)
# @param fit_fun specific fitting action (if needed)
# @param accept_fun specific accept action (if needed)
# @param silent silent toys?
# @param progress show progress bar?
# @param logger logger
# @param frequency how often to dump the intermediate results ?
# @return dictionary with fit results for the toys and the dictionary of statistics
#
# - If <code>gen_fun</code> is not specified <code>generate_data</code> is used
# - If <code>fit_fun</code> is not specified <code>make_fit</code> is used
# - If <code>accept_fun</code> is not specified <code>accept_fit</code> is used
def make_toys2 ( gen_pdf , ## pdf to generate toys
fit_pdf , ## pdf to fit
nToys , ## number of pseudoexperiments
data , ## template for dataset/variables
gen_config , ## parameters for <code>pdf.generate</code>
fit_config = {} , ## parameters for <code>pdf.fitTo</code>
gen_pars = {} , ## gen-parameters to reset/use
fit_pars | |
at all. "
msg += "Marking this workflow as `done`."
self.logger.warning(msg, workflow['RequestName'])
self.docKeyUpdate(workflow, transferStatus='done')
return workflow
except Exception as ex:
msg = "Could not make transfer subscription for Workflow: {}".format(workflow['RequestName'])
msg += "Error: {}".format(ex)
self.logger.exception(msg)
return workflow
ddmStatusList = ['new', 'activated', 'completed', 'rejected', 'cancelled']
transferIDs = []
transferStatusList = []
for ddmResult in ddmResultList:
if 'data' in ddmResult.keys():
id = deepcopy(ddmResult['data'][0]['request_id'])
status = deepcopy(ddmResult['data'][0]['status'])
transferStatusList.append({'transferID': id,
'status': status})
transferIDs.append(id)
if transferStatusList and all(map(lambda x:
True if x['status'] in ddmStatusList else False,
transferStatusList)):
self.docKeyUpdate(workflow,
transferStatus='done',
transferIDs=transferIDs)
return workflow
else:
self.docKeyUpdate(workflow,
transferStatus='incomplete')
msg = "No data found in ddmResults for %s. Either dry run mode or " % workflow['RequestName']
msg += "broken transfer submission to DDM. "
msg += "ddmResults: \n%s" % pformat(ddmResultList)
self.logger.warning(msg)
return workflow
elif isinstance(workflow, (list, set, CommandCursor)):
ddmRequests = {}
for wflow in workflow:
wflowName = wflow['RequestName']
ddmRequests[wflowName] = DDMReqTemplate('copy',
item=wflow['OutputDatasets'],
n=wflow['numberOfCopies'],
site=wflow['destination'])
if self.msConfig['enableAggSubscr']:
# ddmResults = self.ddm.makeAggRequests(ddmRequests.values(), aggKey='item')
# TODO:
# Here to deal with the reverse mapping of DDM request_id to workflow
pass
else:
# for wflowName, ddmReq in ddmRequests.items():
# ddmResults.append(self.ddm.makeRequests(ddmReq))
# TODO:
# Here to deal with making request per workflow and
# reconstructing and returning the same type of object
# as the one that have been passed to the current call.
pass
# FIXME:
msg = "Not yet implemented mode with workflows of type %s!\n" % type(workflow)
msg += "Skipping this call"
self.logger.error(msg)
raise NotImplementedError
else:
msg = "Unsupported type %s for workflows!\n" % type(workflow)
msg += "Skipping this call"
self.logger.error(msg)
raise UnsupportedError
elif self.msConfig['defaultDataManSys'] == 'PhEDEx':
pass
elif self.msConfig['defaultDataManSys'] == 'Rucio':
if isinstance(workflow, MSOutputTemplate):
self.logger.info("Making transfer subscriptions for %s", workflow['RequestName'])
rucioResultList = []
if workflow['destinationOutputMap']:
for dMap in workflow['destinationOutputMap']:
try:
copies = workflow['numberOfCopies'] if workflow['numberOfCopies'] else 1
# NOTE:
# Once we get rid of DDM this rseExpression generation
# should go in the Producer thread
if workflow['isRelVal']:
if dMap['destination']:
rseUnion = '('+'|'.join(dMap['destination'])+')'
else:
# NOTE:
# If we get to here it is most probably because the destination
# in the destinationOutputMap is empty (due to an
# 'ALCARECO' dataset from a Relval workflow or similar).
# Since this is expected to happen a lot, we'd better just
# log a warning and continue
msg = "No destination provided. Avoid creating transfer subscription for "
msg += "Workflow: %s : Dataset Names: %s"
self.logger.warning(msg, workflow['RequestName'], dMap['datasets'])
continue
rseExpression = rseUnion + '&cms_type=real&rse_type=DISK'
# NOTE:
# The above rseExpression should resolve to something similar to:
# (T2_CH_CERN|T1_US_FNAL_Disk)&cms_type=real&rse_type=DISK
# where the first part is a Union of all destination sites and
# the second part is a general constraint for those to be real
# entries but not `Test` or `Temp` and we also target only sites
# marked as `Disk`
else:
rseExpression = '(tier=2|tier=1)&cms_type=real&rse_type=DISK'
# NOTE:
# The above rseExpression should target all T1_*_Disk and T2_*
# sites, where the first part is a Union of those Tiers and
# the second part is a general constraint for those to be real
# entries but not `Test` or `Temp` and we also target only sites
# marked as `Disk`
if self.msConfig['enableDataPlacement']:
rucioResultList.append(self.rucio.createReplicationRule(dMap['datasets'],
rseExpression,
copies=copies))
else:
msg = "DRY-RUN:: The effective Rucio submission would look like: \n"
msg += "account: %s \n"
msg += "dids: %s \n"
msg += "rseExpression: %s\n"
msg += "copies: %s\n"
self.logger.warning(msg,
self.msConfig['rucioAccount'],
pformat(dMap['datasets']),
rseExpression,
copies)
except Exception as ex:
msg = "Could not make transfer subscription for Workflow: %s\n:%s"
self.logger.exception(msg, workflow['RequestName'], str(ex))
return workflow
else:
# NOTE:
# Nothing else to be done here. We mark the document as
# done so we do not iterate through it multiple times
msg = "Skip submissions for %s. Either all data Tiers were "
msg += "excluded or there were no Output Datasets at all. "
msg += "Marking this workflow as `done`."
self.logger.warning(msg, workflow['RequestName'])
self.docKeyUpdate(workflow, transferStatus='done')
return workflow
transferIDs = rucioResultList
if self.msConfig['enableDataPlacement']:
self.docKeyUpdate(workflow,
transferStatus='done',
transferIDs=transferIDs)
return workflow
elif isinstance(workflow, (list, set, CommandCursor)):
# FIXME:
msg = "Not yet implemented mode with workflows of type %s!\n" % type(workflow)
msg += "Skipping this call"
self.logger.error(msg)
raise NotImplementedError
else:
msg = "Unsupported type %s for workflows!\n" % type(workflow)
msg += "Skipping this call"
self.logger.error(msg)
raise UnsupportedError
# NOTE:
# if we are about to implement this through a pipeline we MUST not
# return the result here but the WHOLE document with updated fields
# for the transfer as it will be passed to the next function in
# the pipeline and uploaded to MongoDB
return workflow
def getRequestRecords(self, reqStatus):
"""
Queries ReqMgr2 for requests in a given status.
NOTE: to be taken from MSTransferor with minor changes
"""
# NOTE:
# If we are about to use an additional database for book keeping like
# MongoDB, we can fetch up to 'limitRequestsPerCycle' and keep track
# their status.
# The following is taken from MSMonitor, just for an example.
# get requests from ReqMgr2 data-service for given status
# here with detail=False we get back list of records
result = self.reqmgr2.getRequestByStatus([reqStatus], detail=True)
if not result:
requests = {}
else:
requests = result[0]
self.logger.info(' retrieved %s requests in status: %s', len(requests), reqStatus)
return requests
def msOutputConsumer(self):
"""
A top level function to drive the creation and book keeping of all the
subscriptions to the Data Management System
"""
# DONE:
# Done: To check if the 'enableDataPlacement' flag is really taken into account
# Done: To make this for both relvals and non relvals
# Done: To return the result
# Done: To make report document
# Done: To build it through a pipe
# Done: To write back the updated document to MonogoDB
msPipelineRelVal = Pipeline(name="MSOutputConsumer PipelineRelVal",
funcLine=[Functor(self.docReadfromMongo,
self.msOutRelValColl,
setTaken=False),
Functor(self.makeSubscriptions),
Functor(self.docKeyUpdate,
isTaken=False,
isTakenBy=None,
lastUpdate=int(time())),
Functor(self.docUploader,
self.msOutRelValColl,
update=True,
keys=['isTaken',
'lastUpdate',
'transferStatus',
'transferIDs']),
Functor(self.docDump, pipeLine='PipelineRelVal'),
Functor(self.docCleaner)])
msPipelineNonRelVal = Pipeline(name="MSOutputConsumer PipelineNonRelVal",
funcLine=[Functor(self.docReadfromMongo,
self.msOutNonRelValColl,
setTaken=False),
Functor(self.makeSubscriptions),
Functor(self.docKeyUpdate,
isTaken=False,
isTakenBy=None,
lastUpdate=int(time())),
Functor(self.docUploader,
self.msOutNonRelValColl,
update=True,
keys=['isTaken',
'lastUpdate',
'transferStatus',
'transferIDs']),
Functor(self.docDump, pipeLine='PipelineNonRelVal'),
Functor(self.docCleaner)])
# NOTE:
# If we actually have any exception that has reached to the top level
# exception handlers (eg. here - outside the pipeLine), this means
# some function from within the pipeLine has not caught it and the msOutDoc
# has left the pipe and died before the relevant document in MongoDB
# has been released (its flag 'isTaken' to be set back to False)
wfCounters = {}
for pipeLine in [msPipelineRelVal, msPipelineNonRelVal]:
pipeLineName = pipeLine.getPipelineName()
wfCounters[pipeLineName] = 0
while wfCounters[pipeLineName] < self.msConfig['limitRequestsPerCycle']:
# take only workflows:
# - which are not already taken or
# - a transfer subscription have never been done for them and
# - avoid retrying workflows in the same cycle
# NOTE:
# Once we are running the service not in a dry run mode we may
# consider adding and $or condition in mQueryDict for transferStatus:
# '$or': [{'transferStatus': None},
# {'transferStatus': 'incomplete'}]
# So that we can collect also workflows with partially or fully
# unsuccessful transfers
currTime = int(time())
treshTime = currTime - self.msConfig['interval']
mQueryDict = {
'$and': [
{'isTaken': False},
{'$or': [
{'transferStatus': None},
{'transferStatus': 'incomplete'}]},
{'$or': [
{'lastUpdate': None},
{'lastUpdate': {'$lt': treshTime}}]}]}
# FIXME:
# To redefine those exceptions as MSoutputExceptions and
# start using those here so we do not mix with general errors
try:
pipeLine.run(mQueryDict)
except KeyError as ex:
msg = "%s Possibly malformed record in MongoDB. Err: %s. " % (pipeLineName, str(ex))
msg += "Continue to the next document."
self.logger.exception(msg)
continue
except TypeError as ex:
msg = "%s Possibly malformed record in MongoDB. Err: %s. " % (pipeLineName, str(ex))
msg += "Continue to the next document."
self.logger.exception(msg)
continue
except EmptyResultError as ex:
msg = "%s All relevant records in MongoDB exhausted. " % pipeLineName
msg += "We are done for the current cycle."
self.logger.info(msg)
break
except Exception as ex:
msg = "%s General Error from | |
"[DBG]: 2"
df2 = open("/tmp/orig_content", "w")
df2.write("---begin---\n")
df2.write(file_content)
df2.close()
# For debugging purposes write the file to tmp
df = open("/tmp/toprogram_dbg", "w")
try:
try:
# TODO: encode? utf8?
if isinstance(file_content, unicode):
if DEBUG: print "[DBG]: Encoding file content in utf8"
file_content_encoded = file_content.encode('utf8')
else:
if DEBUG: print "[DBG]: Not encoding file content"
file_content_encoded = file_content
file_content_recovered = ExperimentUtil.deserialize(file_content_encoded)
os.write(fd, file_content_recovered)
if DEBUG:
df.write(file_content_recovered)
finally:
os.close(fd)
self._programmer.program(file_name)
finally:
os.remove(file_name)
# print file_name
# import sys
# sys.stdout.flush()
except Exception as e:
if DEBUG:
tb = traceback.format_exc()
print "FULL EXCEPTION IS: {0}".format(tb)
# TODO: test me
log.log(UdXilinxExperiment, log.level.Info,
"Exception joining sending program to device: %s" % e.args[0])
log.log_exc(UdXilinxExperiment, log.level.Debug)
raise ExperimentErrors.SendingFileFailureError("Error sending file to device: %s" % e)
self._clear()
def _clear(self):
try:
self._command_sender.send_command("CleanInputs")
self._switches_state = [0] * 10
for i in range(5):
self.change_switch(i, False, True)
except Exception as e:
raise ExperimentErrors.SendingCommandFailureError(
"Error sending command to device: %s" % e
)
@Override(Experiment.Experiment)
@logged("info")
def do_dispose(self):
"""
We make sure that the board programming thread has finished, just
in case the experiment finished early and its still on it.
"""
self._use_time_start = None # Ensure that the time gets reset.
if self._programming_thread is not None:
self._programming_thread.join()
# Cleaning references
self._programming_thread = None
if self._watertank is not None:
# In case it is running.
self._watertank.autoupdater_stop()
return "ok"
@Override(Experiment.Experiment)
@logged("info")
def do_start_experiment(self, *args, **kwargs):
self._current_state = STATE_NOT_READY
return json.dumps({
"initial_configuration": """{ "webcam" : "%s", "expected_programming_time" : %s, "expected_synthesizing_time" : %s, "max_use_time" : %s }""" % (
self.webcam_url, self._programmer_time, self._synthesizer_time, self._max_use_time), "batch": False})
def virtualworld_update(self, delta):
"""
Handles virtual world updating. For instance, in the case of the watertank,
it will control the virtual sensors (switches) depending on the watertank level.
"""
if self._watertank != None:
waterLevel = self._watertank.get_water_level()
self.change_switch(0, waterLevel >= 0.20)
self.change_switch(1, waterLevel >= 0.50)
self.change_switch(2, waterLevel >= 0.80)
# These only apply for the temperature mode, but they are always valid nonetheless.
temps = self._watertank.get_temperatures()
self.change_switch(3, temps[0] > 200) # The 150 is essentially arbitrary
self.change_switch(4, temps[1] > 200) # The 150 is essentially arbitrary
self._watertank_time_without_demand_change += delta
if self._watertank_time_without_demand_change > 5:
self._watertank_time_without_demand_change = 0
self._watertank.set_outputs([random.randint(0, 40)])
# TODO: Eventually, there should be some way to limit the number of switches that a
# user can explicitly control depending on the VirtualWorld simulation and state.
# For instance, if the first switch represents a water level sensor, it makes no
# sense for the user to be able to define its state. For now, it is left as-is
# mainly for debugging convenience.
def change_switch(self, switch, on, force_update = False):
"""
Changes the state of a switch. This can be used, for instance, for
simulating sensors.
@param switch Number of the switch to change.
@param on True if we wish to turn it on, false to turn it off.
"""
if on:
if self._switches_state[9 - switch] == "0" or force_update:
self._command_sender.send_command("ChangeSwitch %s %d" % ("on", 9 - switch))
else:
if self._switches_state[9 - switch] == "1" or force_update:
self._command_sender.send_command("ChangeSwitch %s %d" % ("off", 9 - switch))
if on:
self._switches_state[9 - switch] = "1"
else:
self._switches_state[9 - switch] = "0"
return
@Override(Experiment.Experiment)
def do_should_finish(self):
if DEBUG:
print "[DBG]: We're on should_finish."
# Check here that we still have use time left. When the refactor takes place,
# this should maybe be moved somewhere else.
if self._max_use_time != 0 and self._use_time_start is not None:
elapsed = time.time() - self._use_time_start
if elapsed >= self._max_use_time:
# We are overtime. We should make the user finish.
self._current_state = STATE_USE_TIME_EXCEEDED
print "[DBG]: Time was indeed exceeded. Quitting now."
# TODO: Maybe we should give some extra seconds so that the state
# STATE_USE_TIME_EXCEEDED can be received normally.
return -1
return 10 # We still haven't exceeded our time. Check again in ten seconds.
@logged("info")
@Override(Experiment.Experiment)
@caller_check(ServerType.Laboratory)
def do_send_command_to_device(self, command):
try:
# Reply with the current state of the experiment. Particularly, the clients
# will need to know whether the programming has been done and whether we are
# hence ready to start receiving real commands.
if command == 'STATE':
if self._fake:
self._current_state = STATE_READY
if DEBUG:
print "[DBG]: STATE CHECK: " + self._current_state
reply = "STATE=" + self._current_state
return reply
elif command.startswith('ChangeSwitch'):
# Intercept the ChangeSwitch command to track the state of the Switches.
# This command will in fact be later relied to the Device.
cs = command.split(" ")
switch_number = cs[2]
# TODO: Make sure that switches are being properly used,
# and that reversion issues are taken into account.
if cs[1] == "on":
self._switches_state[int(switch_number)] = "1"
else:
self._switches_state[int(switch_number)] = "0"
elif command == 'REPORT_USE_TIME_LEFT':
if self._max_use_time == 0:
time_left = "unlimited"
elif self._use_time_start is None:
time_left = "unknown"
else:
elapsed = time.time() - self._use_time_start
remaining = self._max_use_time - elapsed
if remaining < 0: remaining = 0
time_left = str(remaining)
if DEBUG:
print "[DBG]: REPORT_USE_TIME_LEFT: Time Left: %s" % time_left
return time_left
elif command == 'REPORT_SWITCHES':
# TODO: Currently this returns a list. It is somewhat weird for this to return a list.
# This should be fixed, after making sure it will not break anything.
return self._switches_state
elif command.startswith('VIRTUALWORLD_MODE'):
vw = command.split(" ")[1]
self._virtual_world = vw
# Stop the watertank if it is running.
if self._watertank is not None:
self._watertank.autoupdater_stop()
if vw == "watertank":
self._watertank = watertank_simulation.Watertank(1000, [30, 30], [10], 0.5)
self._last_virtualworld_update = time.time()
self._watertank.autoupdater_start(1)
return "ok"
elif vw == "watertank_temperatures":
self._virtual_world = "watertank" # So that other parts of the code aren't confused. Not very tidy. TODO: Fixme.
self._watertank = watertank_simulation.Watertank(1000, [30, 30], [10], 0.5, True)
self._last_virtualworld_update = time.time()
self._watertank.autoupdater_start(1)
return "ok"
else:
return "unknown_virtualworld"
elif command.startswith('VIRTUALWORLD_STATE'):
if (self._watertank != None):
self._virtual_world_state = self._watertank.get_json_state([30, 30], [40])
now = time.time()
# TODO: This should not be done here. For now however, it's the easiest place to put it in.
self.virtualworld_update(now - self._last_virtualworld_update)
self._last_virtualworld_update = now
return self._virtual_world_state
return "{}";
elif command == 'HELP':
return "VIRTUALWORLD_MODE | VIRTUALWORLD_STATE | SYNTHESIZING_RESULT | READ_LEDS | REPORT_SWITCHES | REPORT_USE_TIME_LEFT | STATE | ChangeSwitch"
elif command == 'SYNTHESIZING_RESULT':
if (DEBUG):
print "[DBG]: SYNTHESIZING_RESULT: " + self._compiling_result
return self._compiling_result
elif command == 'READ_LEDS':
try:
self._led_state = self.query_leds_from_json()
if DEBUG:
print("[DBG]: LED state queried. It is: {0}".format(self._led_state))
if self._virtual_world == "watertank":
# Note: The following needs a somewhat major redesign.
self._update_watertank(self._led_state)
return "".join(self._led_state)
except Exception as e:
traceback.print_exc()
return "ERROR: " + traceback.format_exc()
# Otherwise we assume that the command is intended for the actual device handler
# If it isn't, it throw an exception itself.
if self._switches_reversed:
if command.startswith("ChangeSwitch"):
command = command.replace(command[-1], str(9 - int(command[-1])))
self._command_sender.send_command(command)
except Exception as e:
if DEBUG:
traceback.print_exc(e)
raise ExperimentErrors.SendingCommandFailureError(
"Error sending command to device: %s" % e
)
def query_leds_from_json(self):
"""
The server reports the LEDs from left to right (leftmost LED being 0, topmost being 9)
:return:
"""
if self._fake or self._fake_leds:
return ['0']*10
jsonurl = self._leds_service_url
o = urllib2.urlopen(jsonurl)
jsonstr = o.read()
js = json.loads(jsonstr)
inputsMap = {}
inputs = js["inputs"]
inputsList = []
for input in inputs:
number = input["inputNumber"]
value = input["value"]
inputsMap[int(number)] = value
# We store only the first 8. (why?).
for i in range(8):
inputsList.append(inputsMap[i])
return inputsList
def _update_watertank(self, led_state):
"""
This function should probably be moved somewhere, and made generic. Ideally, we would want
watertank to be some kind of plugin.
"""
first_pump = led_state[7] == '1'
second_pump = led_state[6] == '1'
if first_pump:
first_pump = 30
else:
first_pump = 0
if second_pump:
second_pump = 30
else:
second_pump = 0
self._watertank.set_input(0, first_pump)
self._watertank.set_input(1, second_pump)
if __name__ == "__main__":
from voodoo.configuration import ConfigurationManager
from voodoo.sessions.session_id import SessionId
cfg_manager = ConfigurationManager()
try:
cfg_manager.append_path("../../../launch/sample/main_machine/main_instance/experiment_fpga/server_config.py")
except:
cfg_manager.append_path("../launch/sample/main_machine/main_instance/experiment_fpga/server_config.py")
experiment = UdXilinxExperiment(None, None, cfg_manager)
lab_session_id = SessionId('my-session-id')
experiment.do_start_experiment()
experiment._max_use_time = 10
print experiment.do_send_command_to_device("REPORT_USE_TIME_LEFT")
print experiment.do_send_command_to_device("STATE")
print experiment.do_send_command_to_device("STATE")
print experiment.do_should_finish()
print experiment.do_send_command_to_device("STATE")
print experiment.do_should_finish()
print experiment.do_send_command_to_device("VIRTUALWORLD_STATE")
print experiment.do_send_command_to_device("REPORT_SWITCHES")
print experiment.do_send_command_to_device("ChangeSwitch on 1")
print experiment.do_send_command_to_device("REPORT_SWITCHES")
print experiment.do_send_command_to_device("VIRTUALWORLD_MODE watertank")
print experiment.do_send_command_to_device("VIRTUALWORLD_STATE")
time.sleep(1)
print experiment.do_send_command_to_device("VIRTUALWORLD_STATE")
print experiment.do_send_command_to_device("REPORT_SWITCHES")
time.sleep(1)
| |
'a'],
['lunghezza', 'noun', 'b'],
['lungo', 'adjective', 'a'],
['lungo', 'preposition', 'a'],
['lungo', 'noun', 'a'],
['luogo', 'noun', 'a'],
['lupo', 'noun', 'a'],
['lussemburghese', 'adjective', 'c'],
['lussemburghese', 'noun', 'c'],
['lusso', 'noun', 'b'],
['lutto', 'noun', 'b'],
['ma', 'conjunction', 'a'],
['ma', 'noun', 'a'],
['maccherone', 'noun', 'c'],
['macchia', 'noun', 'a'],
['macchina', 'noun', 'a'],
['macchinista', 'noun', 'c'],
['macedone', 'adjective', 'c'],
['macedone', 'noun', 'c'],
['macedonia', 'noun', 'c'],
['maceria', 'noun', 'b'],
['macinare', 'verb', 'c'],
['madonna', 'noun', 'b'],
['madonna', 'exclamation', 'b'],
['madre', 'noun', 'a'],
['madrileno', 'adjective', 'c'],
['madrileno', 'noun', 'c'],
['madrileno', 'adjective', 'c'],
['madrileno', 'noun', 'c'],
['madrina', 'noun', 'c'],
['maestra', 'noun', 'b'],
['maestranza', 'noun', 'c'],
['maestro', 'noun', 'a'],
['maestro', 'adjective', 'a'],
['mafia', 'noun', 'b'],
['mafioso', 'adjective', 'b'],
['mafioso', 'noun', 'b'],
['magari', 'exclamation', 'a'],
['magari', 'conjunction', 'a'],
['magari', 'adverb', 'a'],
['magazzino', 'noun', 'b'],
['maggio', 'noun', 'a'],
['maggioranza', 'noun', 'a'],
['maggiorenne', 'adjective', 'c'],
['maggiorenne', 'noun', 'c'],
['maggiormente', 'adverb', 'b'],
['magia', 'noun', 'b'],
['magico', 'adjective', 'a'],
['magistrato', 'noun', 'b'],
['magistratura', 'noun', 'b'],
['maglia', 'noun', 'a'],
['maglietta', 'noun', 'b'],
['magnetico', 'adjective', 'b'],
['magnifico', 'adjective', 'b'],
['mago', 'noun', 'b'],
['mago', 'adjective', 'b'],
['magro', 'adjective', 'b'],
['magro', 'noun', 'b'],
['mah', 'exclamation', 'b'],
['mai', 'adverb', 'a'],
['maiale', 'noun', 'b'],
['maionese', 'noun', 'c'],
['mais', 'noun', 'c'],
['maiuscola', 'noun', 'c'],
['malato', 'adjective', 'a'],
['malato', 'noun', 'a'],
['malattia', 'noun', 'a'],
['malaugurio', 'noun', 'c'],
['malavita', 'noun', 'c'],
['male', 'adverb', 'a'],
['male', 'exclamation', 'a'],
['male', 'noun', 'a'],
['maledetto', 'past_part', 'b'],
['maledetto', 'adjective', 'b'],
['maledetto', 'noun', 'b'],
['maledizione', 'noun', 'b'],
['maledizione', 'exclamation', 'b'],
['maleducato', 'adjective', 'c'],
['maleducato', 'noun', 'c'],
['maleducazione', 'noun', 'c'],
['malgrado', 'noun', 'b'],
['malgrado', 'adverb', 'b'],
['malgrado', 'conjunction', 'b'],
['malgrado', 'preposition', 'b'],
['malinconia', 'noun', 'b'],
['malinteso', 'adjective', 'c'],
['malinteso', 'noun', 'c'],
['malizia', 'noun', 'c'],
['maltempo', 'noun', 'c'],
['maltese', 'adjective', 'c'],
['maltese', 'noun', 'c'],
['maltrattamento', 'noun', 'c'],
['maltrattare', 'verb', 'c'],
['malva', 'noun', 'c'],
['malvagio', 'adjective', 'b'],
['malvagio', 'noun', 'b'],
['mamma', 'noun', 'a'],
['mammella', 'noun', 'c'],
['mammifero', 'noun', 'c'],
['manager', 'noun', 'b'],
['mancanza', 'noun', 'a'],
['mancare', 'verb', 'a'],
['mancato', 'past_part', 'b'],
['mancato', 'adjective', 'b'],
['mancino', 'adjective', 'c'],
['mancino', 'noun', 'c'],
['manco', 'adjective', 'b'],
['manco', 'adverb', 'b'],
['mandare', 'verb', 'a'],
['mandarino', 'noun', 'c'],
['mandarino', 'adjective', 'c'],
['mandato', 'past_part', 'b'],
['mandato', 'adjective', 'b'],
['mandato', 'noun', 'b'],
['mandorla', 'noun', 'c'],
['mandorlo', 'noun', 'c'],
['manganello', 'noun', 'c'],
['mangiare', 'verb', 'a'],
['mangime', 'noun', 'c'],
['mania', 'noun', 'b'],
['maniaco', 'adjective', 'c'],
['maniaco', 'noun', 'c'],
['manica', 'noun', 'b'],
['manico', 'noun', 'b'],
['maniera', 'noun', 'a'],
['manifestare', 'verb', 'a'],
['manifestazione', 'noun', 'a'],
['manifesto', 'noun', 'b'],
['mano', 'noun', 'a'],
['manodopera', 'noun', 'c'],
['manoscritto', 'adjective', 'b'],
['manoscritto', 'noun', 'b'],
['manovale', 'noun', 'c'],
['manovra', 'noun', 'b'],
['mantello', 'noun', 'b'],
['mantenere', 'verb', 'a'],
['manuale', 'adjective', 'b'],
['manuale', 'noun', 'b'],
['manuale', 'noun', 'b'],
['manutenzione', 'noun', 'b'],
['manzo', 'noun', 'c'],
['mappa', 'noun', 'b'],
['marca', 'noun', 'b'],
['marcare', 'verb', 'b'],
['marchigiano', 'adjective', 'c'],
['marchigiano', 'noun', 'c'],
['marchio', 'noun', 'b'],
['marcia', 'noun', 'b'],
['marciapiede', 'noun', 'b'],
['marcio', 'adjective', 'b'],
['marcio', 'noun', 'b'],
['marcire', 'verb', 'c'],
['marco', 'noun', 'a'],
['mare', 'noun', 'a'],
['marea', 'noun', 'b'],
['maresciallo', 'noun', 'b'],
['margherita', 'noun', 'c'],
['marginale', 'adjective', 'b'],
['marginale', 'noun', 'b'],
['margine', 'noun', 'b'],
['marinaio', 'noun', 'b'],
['marino', 'adjective', 'b'],
['marino', 'noun', 'b'],
['marionetta', 'noun', 'c'],
['marito', 'noun', 'a'],
['marketing', 'noun', 'b'],
['marmellata', 'noun', 'c'],
['marmo', 'noun', 'b'],
['marocchino', 'adjective', 'c'],
['marocchino', 'noun', 'c'],
['marrone', 'noun', 'b'],
['marrone', 'adjective', 'b'],
['martedì', 'noun', 'b'],
['marzo', 'noun', 'a'],
['mascarpone', 'noun', 'c'],
['maschera', 'noun', 'b'],
['mascherare', 'verb', 'b'],
['mascherato', 'past_part', 'c'],
['mascherato', 'adjective', 'c'],
['maschile', 'adjective', 'a'],
['maschile', 'noun', 'a'],
['maschio', 'noun', 'a'],
['maschio', 'adjective', 'a'],
['massa', 'noun', 'a'],
['massa', 'adverb', 'a'],
['massacrare', 'verb', 'b'],
['massacro', 'noun', 'c'],
['massaggio', 'noun', 'c'],
['massaia', 'noun', 'c'],
['massiccio', 'adjective', 'b'],
['massiccio', 'noun', 'b'],
['massimo', 'adjective', 'a'],
['massimo', 'noun', 'a'],
['massimo', 'adverb', 'a'],
['master', 'noun', 'b'],
['masticare', 'verb', 'b'],
['masturbare', 'verb', 'b'],
['matematica', 'noun', 'b'],
['matematico', 'adjective', 'b'],
['matematico', 'noun', 'b'],
['materasso', 'noun', 'b'],
['materia', 'noun', 'a'],
['materiale', 'adjective', 'a'],
['materiale', 'noun', 'a'],
['maternità', 'noun', 'b'],
['materno', 'adjective', 'b'],
['matita', 'noun', 'b'],
['matricola', 'noun', 'b'],
['matrimoniale', 'adjective', 'b'],
['matrimoniale', 'noun', 'b'],
['matrimonio', 'noun', 'a'],
['mattina', 'noun', 'a'],
['mattinata', 'noun', 'b'],
['mattino', 'noun', 'a'],
['matto', 'adjective', 'a'],
['matto', 'noun', 'a'],
['mattone', 'noun', 'b'],
['mattone', 'adjective', 'b'],
['mattone', 'noun', 'b'],
['maturare', 'verb', 'b'],
['maturità', 'noun', 'b'],
['maturo', 'adjective', 'b'],
['mazzo', 'noun', 'b'],
['me', 'pronoun', 'a'],
['meccanico', 'adjective', 'a'],
['meccanico', 'noun', 'a'],
['meccanismo', 'noun', 'a'],
['medaglia', 'noun', 'b'],
['medesimo', 'adjective', 'b'],
['medesimo', 'pronoun', 'b'],
['media', 'noun', 'a'],
['media', 'noun', 'b'],
['mediante', 'preposition', 'b'],
['medicare', 'verb', 'c'],
['medicina', 'noun', 'a'],
['medico', 'noun', 'a'],
['medico', 'adjective', 'b'],
['medievale', 'adjective', 'b'],
['medio', 'adjective', 'a'],
['medio', 'noun', 'a'],
['medioevo', 'noun', 'b'],
['meditare', 'verb', 'b'],
['mediterraneo', 'adjective', 'b'],
['mediterraneo', 'noun', 'b'],
['meglio', 'adverb', 'a'],
['meglio', 'adjective', 'a'],
['meglio', 'noun', 'a'],
['mela', 'noun', 'b'],
['melagrana', 'noun', 'c'],
['melanzana', 'noun', 'c'],
['melo', 'noun', 'c'],
['melograno', 'noun', 'c'],
['melone', 'noun', 'c'],
['membrana', 'noun', 'b'],
['membro', 'noun', 'a'],
['memoria', 'noun', 'a'],
['menare', 'verb', 'b'],
['mendicante', 'pres_part', 'c'],
['mendicante', 'adjective', 'c'],
['mendicante', 'noun', 'c'],
['meno', 'adverb', 'a'],
['meno', 'adjective', 'a'],
['meno', 'preposition', 'a'],
['meno', 'noun', 'a'],
['mensa', 'noun', 'b'],
['mensile', 'adjective', 'b'],
['mensile', 'noun', 'b'],
['mensola', 'noun', 'c'],
['menta', 'noun', 'c'],
['mentale', 'adjective', 'a'],
['mentalità', 'noun', 'b'],
['mente', 'noun', 'a'],
['mentire', 'verb', 'a'],
['mento', 'noun', 'b'],
['mentre', 'conjunction', 'a'],
['menu', 'noun', 'b'],
['menzogna', 'noun', 'b'],
['meraviglia', 'noun', 'b'],
['meravigliare', 'verb', 'b'],
['meraviglioso', 'adjective', 'a'],
['meraviglioso', 'noun', 'a'],
['mercante', 'noun', 'b'],
['mercato', 'noun', 'a'],
['merce', 'noun', 'b'],
['merceria', 'noun', 'c'],
['mercoledì', 'noun', 'b'],
['merda', 'noun', 'a'],
['merenda', 'noun', 'c'],
['merendina', 'noun', 'c'],
['meridiano', 'adjective', 'c'],
['meridiano', 'noun', 'c'],
['meridionale', 'adjective', 'a'],
['meridionale', 'noun', 'a'],
['meridione', 'noun', 'c'],
['meritare', 'verb', 'a'],
['merito', 'noun', 'a'],
['merlo', 'noun', 'c'],
['merluzzo', 'noun', 'c'],
['mero', 'adjective', 'b'],
['mescolare', 'verb', 'b'],
['mese', 'noun', 'a'],
['messa', 'noun', 'b'],
['messa', 'noun', 'b'],
['messaggio', 'noun', 'a'],
['messe', 'noun', 'c'],
['messicano', 'adjective', 'c'],
['messicano', 'noun', 'c'],
['mestiere', 'noun', 'a'],
['mestolo', 'noun', 'c'],
['mestruazione', 'noun', 'c'],
['metà', 'noun', 'a'],
['meta', 'noun', 'b'],
['metafora', 'noun', 'b'],
['metallico', 'adjective', 'b'],
['metallo', 'noun', 'b'],
['metalmeccanico', 'adjective', 'c'],
['metalmeccanico', 'noun', 'c'],
['meteo', 'adjective', 'b'],
['meteo', 'noun', 'b'],
['metodo', 'noun', 'a'],
['metro', 'noun', 'a'],
['metropolitano', 'adjective', 'b'],
['metropolitano', 'noun', 'b'],
['mettere', 'verb', 'a'],
['mezzanotte', 'noun', 'b'],
['mezzo', 'adjective', 'a'],
['mezzo', 'noun', 'a'],
['mezzo', 'adverb', 'a'],
['mezzogiorno', 'noun', 'b'],
['mi', 'pronoun', 'a'],
['miagolare', 'verb', 'c'],
['mica', 'noun', 'a'],
['mica', 'adverb', 'a'],
['micio', 'noun', 'c'],
['microfono', 'noun', 'b'],
['miele', 'noun', 'b'],
['miele', 'adjective', 'b'],
['mietere', 'verb', 'c'],
['migliaio', 'noun', 'c'],
['migliaio', 'noun', 'a'],
['miglioramento', 'noun', 'b'],
['migliorare', 'verb', 'a'],
['migliore', 'adjective', 'a'],
['migliore', 'noun', 'a'],
['migliore', 'adverb', 'a'],
['mignolo', 'noun', 'c'],
['mila', 'adjective', 'a'],
['milanese', 'adjective', 'b'],
['milanese', 'noun', 'b'],
['miliardo', 'noun', 'a'],
['milione', 'noun', 'a'],
['militare', 'adjective', 'a'],
['militare', 'noun', 'a'],
['mille', 'adjective', 'a'],
['mille', 'noun', 'a'],
['millennio', 'noun', 'b'],
['millimetro', 'noun', 'b'],
['mimosa', 'noun', 'c'],
['minaccia', 'noun', 'b'],
['minacciare', 'verb', 'a'],
['minchia', 'noun', 'b'],
['minestra', 'noun', 'c'],
['minestrone', 'noun', 'c'],
['mini', 'adjective', 'c'],
['miniera', 'noun', 'b'],
['minigonna', 'noun', 'c'],
['minimo', 'adjective', 'a'],
['minimo', 'noun', 'a'],
['ministero', 'noun', 'a'],
['ministro', 'noun', 'a'],
['minoranza', 'noun', 'b'],
['minore', 'adjective', 'a'],
['minore', 'noun', 'a'],
['minuscolo', 'adjective', 'b'],
['minuto', 'noun', 'a'],
['mio', 'adjective', 'a'],
['mio', 'pronoun', 'a'],
['miracolo', 'noun', 'a'],
['mirare', 'verb', 'b'],
['mischiare', 'verb', 'b'],
['miscuglio', 'noun', 'c'],
['miseria', 'noun', 'b'],
['misero', 'adjective', 'b'],
['missile', 'adjective', 'c'],
['missile', 'noun', 'c'],
['missione', 'noun', 'a'],
['mister', 'noun', 'c'],
['misterioso', 'adjective', 'b'],
['mistero', 'noun', 'a'],
['misto', 'adjective', 'b'],
['misto', 'noun', 'b'],
['misura', 'noun', 'a'],
['misurare', 'verb', 'b'],
['misurazione', 'noun', 'c'],
['mitico', 'adjective', 'b'],
['mito', 'noun', 'b'],
['mitragliatrice', 'noun', 'c'],
['mobile', 'adjective', 'a'],
['mobile', 'noun', 'a'],
['mobilio', 'noun', 'c'],
['mocassino', 'noun', 'c'],
['moda', 'noun', 'a'],
['modalità', 'noun', 'b'],
['modella', 'noun', 'b'],
['modellare', 'verb', 'c'],
['modello', 'noun', 'a'],
['moderato', 'past_part', 'b'],
['moderato', 'adjective', 'b'],
['moderato', 'adverb', 'b'],
['moderato', 'noun', 'b'],
['moderatore', 'adjective', 'b'],
['moderatore', 'noun', 'b'],
['modernità', 'noun', 'b'],
['moderno', 'adjective', 'a'],
['moderno', 'noun', 'a'],
['modestia', 'noun', 'c'],
['modesto', 'adjective', 'b'],
['modifica', 'noun', 'b'],
['modificare', 'verb', 'a'],
['modificazione', 'noun', 'b'],
['modo', 'noun', 'a'],
['modulo', 'noun', 'b'],
['moglie', 'noun', 'a'],
['molecola', 'noun', 'b'],
['molisano', 'adjective', 'c'],
['molisano', 'noun', 'c'],
['molla', 'noun', 'c'],
['mollare', 'verb', 'b'],
['mollusco', 'noun', 'c'],
['molo', 'noun', 'c'],
['moltiplicare', 'verb', 'b'],
['molto', 'adjective', 'a'],
['molto', 'pronoun', 'a'],
['molto', 'adverb', 'a'],
['molto', 'noun', 'a'],
['momento', 'noun', 'a'],
['monaca', 'noun', 'c'],
['monaco', 'noun', 'c'],
['monarchica', 'noun', 'c'],
['mondiale', 'adjective', 'a'],
['mondiale', 'noun', 'a'],
['mondo', 'noun', 'a'],
['monello', 'noun', 'c'],
['moneta', 'noun', 'a'],
['monetario', 'adjective', 'b'],
['monitor', 'noun', 'b'],
['monologo', 'noun', 'b'],
['montaggio', 'noun', 'b'],
['montagna', 'noun', 'a'],
['montare', 'verb', 'b'],
['monte', 'noun', 'a'],
['montenegrino', 'adjective', 'c'],
['montenegrino', 'noun', 'c'],
['monumento', 'noun', 'b'],
['mora', 'noun', 'b'],
['morale', 'adjective', 'a'],
['morale', 'noun', 'a'],
['morbido', 'adjective', 'b'],
['morbido', 'noun', 'b'],
['mordere', 'verb', 'b'],
['morire', 'verb', 'a'],
['moro', 'adjective', 'b'],
['moro', 'noun', 'b'],
['morsicare', 'verb', 'c'],
['morso', 'noun', 'c'],
['mortadella', 'noun', 'c'],
['mortale', 'adjective', 'b'],
['mortale', 'noun', 'b'],
['morte', 'noun', 'a'],
['morto', 'past_part', 'a'],
['morto', 'adjective', 'a'],
['morto', 'noun', 'a'],
['mosca', 'noun', 'b'],
['moscovita', 'adjective', 'c'],
['moscovita', 'noun', 'c'],
['mossa', 'noun', 'b'],
['mostarda', 'noun', 'c'],
['mostra', 'noun', 'a'],
['mostrare', 'verb', 'a'],
['mostro', 'noun', 'b'],
['motel', 'noun', 'c'],
['motivare', 'verb', 'b'],
['motivazione', 'noun', 'b'],
['motivo', 'noun', 'a'],
['moto', 'noun', 'a'],
['moto', 'noun', 'b'],
['motociclismo', 'noun', 'c'],
['motociclista', 'adjective', 'c'],
['motociclista', 'noun', 'c'],
['motore', 'adjective', 'a'],
['motore', 'noun', 'a'],
['motorino', 'noun', 'b'],
['motoscafo', 'noun', 'c'],
['mousse', 'noun', 'c'],
['movimento', 'noun', 'a'],
['mozzarella', 'noun', 'c'],
['mucca', 'noun', 'b'],
['mucchio', 'noun', 'b'],
['muggire', 'verb', 'c'],
['muggito', 'past_part', 'c'],
['muggito', 'noun', 'c'],
['mugnaio', 'noun', 'c'],
['mugolare', 'verb', 'c'],
['mulino', 'noun', 'c'],
['multa', 'noun', 'b'],
['multare', 'verb', 'c'],
['multinazionale', 'adjective', 'b'],
['multinazionale', 'noun', 'b'],
['multiplo', 'adjective', 'b'],
['multiplo', 'noun', 'b'],
['multipresa', 'noun', 'c'],
['mummia', 'noun', 'c'],
['mungere', 'verb', 'c'],
['municipio', 'noun', 'c'],
['muovere', 'verb', 'a'],
['murare', 'verb', 'c'],
['muratore', 'noun', 'c'],
['muro', 'noun', 'a'],
['muschio', 'noun', 'c'],
['muschio', 'adjective', 'c'],
['muscolare', 'adjective', 'b'],
['muscolare', 'noun', 'b'],
['muscolo', 'noun', 'a'],
['museo', 'noun', 'a'],
['musica', 'noun', 'a'],
['musicale', 'adjective', 'a'],
['musicista', 'noun', 'b'],
['muso', 'noun', 'b'],
['musulmano', 'adjective', 'b'],
['musulmano', 'noun', 'b'],
['muta', 'noun', 'c'],
['mutamento', 'noun', 'b'],
['mutanda', 'noun', 'b'],
['mutandina', 'noun', 'c'],
['mutare', 'verb', 'b'],
['mutazione', 'noun', 'b'],
['mutilato', 'past_part', 'c'],
['mutilato', 'adjective', 'c'],
['mutilato', 'noun', 'c'],
['muto', 'adjective', 'b'],
['muto', 'noun', 'b'],
['mutuo', 'noun', 'b'],
['nanna', 'noun', 'c'],
['nano', 'adjective', 'b'],
['nano', 'noun', 'b'],
['napoletano', 'adjective', 'b'],
['napoletano', 'noun', 'b'],
['narrare', 'verb', 'b'],
['narrativo', 'adjective', 'b'],
['narratore', 'noun', 'b'],
['narrazione', 'noun', 'b'],
['nasale', 'adjective', 'b'],
['nasale', 'noun', 'b'],
['nascere', 'verb', 'a'],
['nascere', 'noun', 'a'],
['nascita', 'noun', 'a'],
['nascondere', 'verb', 'a'],
['nascondiglio', 'noun', 'c'],
['nascondino', 'noun', 'c'],
['nascosto', 'past_part', 'a'],
['nascosto', 'adjective', 'a'],
['nascosto', 'noun', 'a'],
['naso', 'noun', 'a'],
['nastro', 'noun', 'a'],
['natale', 'adjective', 'a'],
['natale', 'noun', 'a'],
['natalizio', 'adjective', 'b'],
['natalizio', 'noun', 'b'],
['nato', 'past_part', 'b'],
['nato', 'adjective', 'b'],
['nato', 'noun', 'b'],
['natura', 'noun', 'a'],
['naturale', 'adjective', 'a'],
['naturale', 'noun', 'a'],
['naturalmente', 'adverb', 'a'],
['naufragio', 'noun', 'c'],
['navale', 'adjective', 'c'],
['nave', 'noun', 'a'],
['navicella', 'noun', 'c'],
['navigare', 'verb', 'b'],
['navigazione', 'noun', 'b'],
['nazionale', 'adjective', 'a'],
['nazionale', 'noun', 'a'],
['nazionalità', 'noun', 'c'],
['nazione', 'noun', 'a'],
['nazista', 'adjective', 'b'],
['nazista', 'noun', 'b'],
['ndrangheta', 'noun', 'c'],
['né', 'conjunction', 'a'],
['ne', 'pronoun', 'a'],
['ne', 'adverb', 'a'],
['neanche', 'adverb', 'a'],
['nebbia', 'noun', 'b'],
['necessariamente', 'adverb', 'b'],
['necessario', 'adjective', 'a'],
['necessario', 'noun', 'a'],
['necessità', 'noun', 'a'],
['necessitare', 'verb', 'b'],
['negare', 'verb', 'a'],
['negativo', 'adjective', 'a'],
['negativo', 'noun', 'a'],
['negativo', 'adverb', 'a'],
['negazione', 'noun', 'c'],
['negoziante', 'pres_part', 'c'],
['negoziante', 'noun', 'c'],
['negozio', 'noun', 'a'],
['negro', 'adjective', 'b'],
['negro', 'noun', 'b'],
['nemico', 'adjective', 'a'],
['nemico', 'noun', 'a'],
['nemmeno', 'adverb', 'a'],
['neo', 'noun', 'c'],
['neonato', 'noun', 'b'],
['neonato', 'adjective', 'b'],
['neppure', 'adverb', 'a'],
['nero', 'adjective', 'a'],
['nero', 'noun', 'a'],
['nervo', 'noun', 'b'],
['nervosismo', 'noun', 'c'],
['nervoso', 'adjective', 'a'],
['nervoso', 'noun', 'a'],
['nessuno', 'adjective', 'a'],
['nessuno', 'pronoun', 'a'],
['nettare', 'noun', 'c'],
['netto', 'adjective', 'b'],
['netto', 'noun', 'b'],
['netto', 'adverb', 'b'],
['network', 'noun', 'b'],
['neutro', 'adjective', 'b'],
['neutro', 'noun', 'b'],
['neve', 'noun', 'a'],
['nevicare', 'verb', 'c'],
['news', 'noun', 'b'],
['newyorkese', 'adjective', 'c'],
['newyorkese', 'noun', 'c'],
['nido', 'noun', 'b'],
['niente', 'pronoun', 'a'],
['niente', 'adjective', 'a'],
['niente', 'adverb', 'a'],
['nipote', 'noun', 'a'],
['no', 'adverb', | |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing :mod:`astropy.cosmology.parameter`."""
##############################################################################
# IMPORTS
# STDLIB
import ast
import inspect
import sys
# THIRD PARTY
import pytest
import numpy as np
# LOCAL
import astropy.units as u
from astropy.cosmology import Cosmology
from astropy.cosmology.core import _COSMOLOGY_CLASSES
from astropy.cosmology.parameter import Parameter, _validate_to_float, _validate_with_unit
##############################################################################
# TESTS
##############################################################################
class ParameterTestMixin:
"""Tests for a :class:`astropy.cosmology.Parameter` on a Cosmology.
:class:`astropy.cosmology.Parameter` is a descriptor and this test suite
tests descriptors by class inheritance, so ``ParameterTestMixin`` is mixed
into ``TestCosmology`` (tests :class:`astropy.cosmology.Cosmology`).
"""
@pytest.fixture
def parameter(self, cosmo_cls):
"""Cosmological Parameters"""
# I wish this would work
# yield from {getattr(cosmo_cls, n) for n in cosmo_cls.__parameters__}
# just return one parameter at random
yield getattr(cosmo_cls, set(cosmo_cls.__parameters__).pop())
@pytest.fixture
def all_parameter(self, cosmo_cls):
"""Cosmological All Parameter instances"""
# I wish this would work
# yield from {getattr(cosmo_cls, n) for n in cosmo_cls.__all_parameters__}
# just return one parameter at random
yield getattr(cosmo_cls, set(cosmo_cls.__all_parameters__).pop())
# ===============================================================
# Method Tests
def test_Parameter_class_attributes(self, all_parameter):
"""Test :class:`astropy.cosmology.Parameter` attributes on class."""
# _registry_validators
assert hasattr(all_parameter, "_registry_validators")
assert isinstance(all_parameter._registry_validators, dict)
assert all(isinstance(k, str) for k in all_parameter._registry_validators.keys())
assert all(callable(v) for v in all_parameter._registry_validators.values())
def test_Parameter_init(self):
"""Test :class:`astropy.cosmology.Parameter` instantiation."""
# defaults
parameter = Parameter()
assert parameter.fvalidate is _validate_with_unit
assert parameter.unit is None
assert parameter.equivalencies == []
assert parameter.format_spec == ""
assert parameter.derived is False
assert parameter.name is None
# setting all kwargs
parameter = Parameter(fvalidate="float", doc="DOCSTRING",
unit="km", equivalencies=[u.mass_energy()],
fmt=".4f", derived=True)
assert parameter.fvalidate is _validate_to_float
assert parameter.unit is u.km
assert parameter.equivalencies == [u.mass_energy()]
assert parameter.format_spec == ".4f"
assert parameter.derived is True
def test_Parameter_instance_attributes(self, all_parameter):
"""Test :class:`astropy.cosmology.Parameter` attributes from init."""
assert hasattr(all_parameter, "fvalidate")
assert callable(all_parameter.fvalidate)
assert hasattr(all_parameter, "__doc__")
# Parameter
assert hasattr(all_parameter, "_unit")
assert hasattr(all_parameter, "_equivalencies")
assert hasattr(all_parameter, "_fmt")
assert hasattr(all_parameter, "_derived")
# __set_name__
assert hasattr(all_parameter, "_attr_name")
assert hasattr(all_parameter, "_attr_name_private")
def test_Parameter_fvalidate(self, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.fvalidate`."""
assert hasattr(all_parameter, "fvalidate")
assert callable(all_parameter.fvalidate)
def test_Parameter_name(self, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.name`."""
assert hasattr(all_parameter, "name")
assert isinstance(all_parameter.name, str)
assert all_parameter.name is all_parameter._attr_name
def test_Parameter_unit(self, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.unit`."""
assert hasattr(all_parameter, "unit")
assert isinstance(all_parameter.unit, (u.UnitBase, type(None)))
assert all_parameter.unit is all_parameter._unit
def test_Parameter_equivalencies(self, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.equivalencies`."""
assert hasattr(all_parameter, "equivalencies")
assert isinstance(all_parameter.equivalencies, (list, u.Equivalency))
assert all_parameter.equivalencies is all_parameter._equivalencies
def test_Parameter_format_spec(self, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.format_spec`."""
assert hasattr(all_parameter, "format_spec")
assert isinstance(all_parameter.format_spec, str)
assert all_parameter.format_spec is all_parameter._fmt
def test_Parameter_derived(self, cosmo_cls, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.derived`."""
assert hasattr(all_parameter, "derived")
assert isinstance(all_parameter.derived, bool)
assert all_parameter.derived is all_parameter._derived
# test value
if all_parameter.name in cosmo_cls.__parameters__:
assert all_parameter.derived is False
else:
assert all_parameter.derived is True
# -------------------------------------------
# descriptor methods
def test_Parameter_descriptor_get(self, cosmo_cls, cosmo, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.__get__`."""
# from class
parameter = getattr(cosmo_cls, all_parameter.name)
assert isinstance(parameter, Parameter)
assert parameter is all_parameter
# from instance
parameter = getattr(cosmo, all_parameter.name)
assert np.all(parameter == getattr(cosmo, all_parameter._attr_name_private))
def test_Parameter_descriptor_set(self, cosmo, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.__set__`."""
# test it's already set
assert hasattr(cosmo, all_parameter._attr_name_private)
# and raises an error if set again
with pytest.raises(AttributeError, match="can't set attribute"):
setattr(cosmo, all_parameter._attr_name, None)
# -------------------------------------------
# validate value
# tested later.
# ===============================================================
# Usage Tests
def test_Parameter_listed(self, cosmo_cls, all_parameter):
"""Test each `astropy.cosmology.Parameter` attached to Cosmology."""
# just double check that each entry is a Parameter
assert isinstance(all_parameter, Parameter)
# the reverse: check that if it is a Parameter, it's listed.
# note have to check the more inclusive ``__all_parameters__``
assert all_parameter.name in cosmo_cls.__all_parameters__
if not all_parameter.derived:
assert all_parameter.name in cosmo_cls.__parameters__
def test_parameter_related_attributes_on_Cosmology(self, cosmo_cls):
"""Test `astropy.cosmology.Parameter`-related on Cosmology."""
# establish has expected attribute
assert hasattr(cosmo_cls, "__parameters__")
assert hasattr(cosmo_cls, "__all_parameters__")
def test_Parameter_not_unique(self, cosmo_cls, clean_registry):
"""Cosmology Parameter not unique to class when subclass defined."""
# define subclass to show param is same
class ExampleBase(cosmo_cls):
param = Parameter()
class Example(ExampleBase): pass
assert Example.param is ExampleBase.param
assert Example.__parameters__ == ExampleBase.__parameters__
def test_Parameters_reorder_by_signature(self, cosmo_cls, clean_registry):
"""Test parameters are reordered."""
class Example(cosmo_cls):
param = Parameter()
def __init__(self, param, *, name=None, meta=None):
pass # never actually initialized
# param should be 1st, all other parameters next
Example.__parameters__[0] == "param"
# Check the other parameters are as expected.
# only run this test if "param" is not already on the cosmology
if cosmo_cls.__parameters__[0] != "param":
assert set(Example.__parameters__[1:]) == set(cosmo_cls.__parameters__)
def test_make_from_Parameter(self, cosmo_cls, clean_registry):
"""Test the parameter creation process. Uses ``__set__``."""
class Example(cosmo_cls):
param = Parameter(unit=u.eV, equivalencies=u.mass_energy())
def __init__(self, param, *, name=None, meta=None):
self.param = param
@property
def is_flat(self):
return super().is_flat()
assert Example(1).param == 1 * u.eV
assert Example(1 * u.eV).param == 1 * u.eV
assert Example(1 * u.J).param == (1 * u.J).to(u.eV)
assert Example(1 * u.kg).param == (1 * u.kg).to(u.eV, u.mass_energy())
# ========================================================================
class TestParameter(ParameterTestMixin):
"""
Test `astropy.cosmology.Parameter` directly. Adds a lot of specific tests
that wouldn't be covered by the per-cosmology tests.
"""
def setup_class(self):
class Example1(Cosmology):
param = Parameter(doc="Description of example parameter.",
unit=u.m, equivalencies=u.mass_energy())
def __init__(self, param=15):
self.param = param
@property
def is_flat(self):
return super().is_flat()
# with validator
class Example2(Example1):
def __init__(self, param=15 * u.m):
self.param = param
@Example1.param.validator
def param(self, param, value):
return value.to(u.km)
# attributes
self.classes = {"Example1": Example1, "Example2": Example2}
def teardown_class(self):
for cls in self.classes.values():
_COSMOLOGY_CLASSES.pop(cls.__qualname__)
@pytest.fixture(scope="class", params=["Example1", "Example2"])
def cosmo_cls(self, request):
"""Cosmology class."""
return self.classes[request.param]
@pytest.fixture(scope="class")
def cosmo(self, cosmo_cls):
"""Cosmology instance"""
return cosmo_cls()
@pytest.fixture(scope="class")
def param(self, cosmo_cls):
"""Get Parameter 'param' from cosmology class."""
return cosmo_cls.param
# ==============================================================
def test_Parameter_instance_attributes(self, param):
"""Test :class:`astropy.cosmology.Parameter` attributes from init."""
super().test_Parameter_instance_attributes(param)
# property
assert param.__doc__ == "Description of example parameter."
# custom from init
assert param._unit == u.m
assert param._equivalencies == u.mass_energy()
assert param._fmt == ""
assert param._derived == False
# custom from set_name
assert param._attr_name == "param"
assert param._attr_name_private == "_param"
def test_Parameter_fvalidate(self, cosmo, param):
"""Test :attr:`astropy.cosmology.Parameter.fvalidate`."""
super().test_Parameter_fvalidate(param)
value = param.fvalidate(cosmo, param, 1000 * u.m)
assert value == 1 * u.km
def test_Parameter_name(self, param):
"""Test :attr:`astropy.cosmology.Parameter.name`."""
super().test_Parameter_name(param)
assert param.name == "param"
def test_Parameter_unit(self, param):
"""Test :attr:`astropy.cosmology.Parameter.unit`."""
super().test_Parameter_unit(param)
assert param.unit == u.m
def test_Parameter_equivalencies(self, param):
"""Test :attr:`astropy.cosmology.Parameter.equivalencies`."""
super().test_Parameter_equivalencies(param)
assert param.equivalencies == u.mass_energy()
def test_Parameter_format_spec(self, param):
"""Test :attr:`astropy.cosmology.Parameter.format_spec`."""
super().test_Parameter_format_spec(param)
assert param.format_spec == ""
def test_Parameter_derived(self, cosmo_cls, param):
"""Test :attr:`astropy.cosmology.Parameter.derived`."""
super().test_Parameter_derived(cosmo_cls, param)
assert param.derived is False
# -------------------------------------------
# descriptor methods
def test_Parameter_descriptor_get(self, cosmo_cls, cosmo, param):
"""Test :meth:`astropy.cosmology.Parameter.__get__`."""
super().test_Parameter_descriptor_get(cosmo_cls, cosmo, param)
# from instance
value = getattr(cosmo, param.name)
assert value == 15 * u.m
# -------------------------------------------
# validation
def test_Parameter_validator(self, param):
"""Test :meth:`astropy.cosmology.Parameter.validator`."""
for k in Parameter._registry_validators:
newparam = param.validator(k)
assert newparam.fvalidate == newparam._registry_validators[k]
# error for non-registered str
with pytest.raises(ValueError, match="`fvalidate`, if str"):
Parameter(fvalidate="NOT REGISTERED")
# error if wrong type
with pytest.raises(TypeError, match="`fvalidate` must be a function or"):
Parameter(fvalidate=object())
def test_Parameter_validate(self, cosmo, param):
"""Test :meth:`astropy.cosmology.Parameter.validate`."""
value = param.validate(cosmo, 1000 * u.m)
# whether has custom validator
if param.fvalidate is param._registry_validators["default"]:
assert value.unit == u.m
assert value.value == 1000
else:
assert value.unit == u.km
assert value.value == 1
def test_Parameter_register_validator(self, param):
"""Test :meth:`astropy.cosmology.Parameter.register_validator`."""
# already registered
with pytest.raises(KeyError, match="validator 'default' already"):
param.__class__.register_validator("default", None)
# validator not None
try:
func = lambda x: x
validator = param.__class__.register_validator("newvalidator", func)
assert validator is func
finally:
param.__class__._registry_validators.pop("newvalidator", None)
# used as decorator
try:
@param.__class__.register_validator("newvalidator")
def func(cosmology, param, value):
return value
assert param.__class__._registry_validators["newvalidator"] is func
finally:
param.__class__._registry_validators.pop("newvalidator", None)
# -------------------------------------------
def test_Parameter_clone(self, param):
"""Test :meth:`astropy.cosmology.Parameter.clone`."""
# this implicitly relies on `__eq__` testing properly. Which is tested.
# basic test that nothing changes
assert param.clone() == param
assert param.clone() is not param # but it's not a 'singleton'
# passing kwargs will change stuff
newparam = param.clone(unit="km/(yr sr)")
assert newparam.unit == u.km / u.yr / u.sr
assert param.unit != u.km / u.yr / u.sr # original is unchanged
# expected failure for not-an-argument
with pytest.raises(TypeError):
param.clone(not_a_valid_parameter=True)
# -------------------------------------------
def test_Parameter_equality(self):
"""
Test Parameter equality.
Determined from the processed initialization args (including defaults).
"""
p1 = Parameter(unit="km / (s Mpc)")
p2 = Parameter(unit="km / (s Mpc)")
assert p1 == p2
# not equal parameters
p3 = Parameter(unit="km / s")
assert p3 != p1
# misc
assert p1 != 2 # show doesn't error
# -------------------------------------------
def test_Parameter_repr(self, cosmo_cls, param):
"""Test Parameter repr."""
r = repr(param)
assert "Parameter(" in r
for subs in ("derived=False", 'unit=Unit("m")', 'equivalencies=[(Unit("kg"), Unit("J")',
"fmt=''", "doc='Description of example parameter.'"):
assert subs in r, subs
# `fvalidate` is a little tricker b/c one of them is custom!
if param.fvalidate in param._registry_validators.values(): # not custom
assert "fvalidate='default'" | |
# -*- coding: utf-8 -*-
#
#
from __future__ import print_function
import csv
import os
import re
import sys
import arrow
from gsheets import Sheets
CURRENT_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
DEBUG = os.environ.get('DEBUG', "0") == "1"
AS_CSV = os.environ.get('CSV', "0") == "1"
COL_DATE = 0
COL_WEEKDAY = 1
COL_TIME_START = 2
COL_TIME_END = 3
COL_LUNCH = 4
COL_TIME = 5 # includes lunch
COL_TIME_FIXED = 6 # does not include lunch
COL_MOVE = 7
COL_WORK_FROM_HOME = 8
COL_NOTES = 9
COL_TASKS_START = 10
SPECIAL_VALUES = ["sick", "ab", "off", "wfh", "hol"]
SATURDAY = 5
SUNDAY = 6
def calc(hour, half_it=False, split_char = ":"):
parts = str(hour).split(split_char)
try:
local_hours = int(parts[0])
local_minutes = int(parts[1])
if half_it:
local_hours = local_hours / 2
local_minutes = local_minutes / 2
return local_hours, local_minutes
except:
if len(parts) == 1:
try:
return int(parts[0]), 0
except:
return 0, 0
def get_client_secret_filenames():
filename = os.path.join(CURRENT_PATH, "client-secrets.json")
cachefile = os.path.join(CURRENT_PATH, "client-secrets-cache.json")
if not os.path.exists(filename):
filename = os.path.expanduser(os.path.join("~", "client-secrets.json"))
cachefile = os.path.expanduser(os.path.join("~", "client-secrets-cache.json"))
if not os.path.exists(filename):
raise Exception("Please provide a client-secret.json file, as described here: https://github.com/xflr6/gsheets#quickstart")
return filename, cachefile
def load_first_sheet_rows(api, timesheet_url, date=arrow.now().format('YYYYMMDD')):
print("Opening timesheet for %s ..." % (date))
sheets = api.get(timesheet_url)
sheet = sheets.sheets[0]
print(u"Timesheet [%s] sheet [%s] opened. Accessing cell data ..." % (sheets.title or "???", sheet.title or "???"))
rows = sheet.values()
return rows
def load_sheet_and_read_data(api, timesheet_url, commandline, user_full_name):
now = arrow.now()
today = now.format('YYYYMMDD')
try:
other_date = arrow.get(commandline, 'YYYYMMDD').format('YYYYMMDD')
except arrow.parser.ParserError:
other_date = today
use_date = other_date
rows = load_first_sheet_rows(api, timesheet_url, use_date)
timesheet = get_timesheet_for_date(rows, use_date, user_full_name)
if timesheet:
print("\n\n")
print("Timesheet for %s" % (use_date))
print(timesheet)
print("\n")
else:
print("No entry found for %s" % use_date)
def get_timesheet_for_date(rows, date, user_full_name):
# find the row with the first column that has today's date in it
result_rows = [row for row in rows if row and str(row[COL_DATE]) == date]
if result_rows is None or not result_rows:
return None
if len(result_rows) != 1:
print("More than one entry (%d) found for date %s! Please fix your sheet!" % (len(result_rows), date))
return None
found_row = result_rows[0]
found_index = rows.index(found_row)
start_val = found_row[COL_TIME_START]
end_val = found_row[COL_TIME_END]
duration_val = found_row[COL_TIME_FIXED]
max_cols = len(found_row)
if not start_val:
if start_val in SPECIAL_VALUES:
print("You forgot to add your start time.")
return None
if not end_val:
if end_val in SPECIAL_VALUES:
print("You forgot to add your end time.")
return None
#if max_cols >= COL_NOTES:
# print("No notes/tasks entered yet.")
# return None
def parse_hours(val):
try:
return arrow.get(val, "HH:mm")
except arrow.parser.ParserError:
return arrow.get(val, "H:mm")
start = parse_hours(start_val).format("HH:mm")
end = parse_hours(end_val).format("HH:mm")
duration = str(duration_val)
notes_str = found_row[COL_NOTES]
notes = notes_str.split('\n')
# check the previous Friday entry (if today is not Friday), to see what work from home
# days were were selected
weekday = (found_row[COL_WEEKDAY] or "").lower()
check_start_index = found_index if weekday.startswith("fr") else found_index - 7
check_row = found_row
while (check_start_index < found_index):
check_row = rows[check_start_index]
if (len(check_row) > COL_WEEKDAY and check_row[COL_WEEKDAY] or "").lower().startswith("fr"):
break
check_start_index += 1
is_same_day = None
if check_start_index != found_index:
# print("HA! GOT PREVS FRIDAY.")
is_same_day = False
else:
# print("SAME DAY")
is_same_day = True
wfh = u"" if len(check_row)-1 < COL_WORK_FROM_HOME else check_row[COL_WORK_FROM_HOME]
wfh = wfh.replace("Mon", "Monday")
wfh = wfh.replace("Tue", "Tuesday")
wfh = wfh.replace("Wed", "Wednesday")
wfh = wfh.replace("Thu", "Thursday")
wfh = wfh.replace("Fri", "Friday")
wfh = wfh.replace(", ", ",").replace(",", " and ")
wfh_extra = "Next week" if is_same_day else "This week"
wfh_info = """%s %s""" % (wfh_extra, wfh) if wfh != "" else "all days"
# 2021-01-04 just make this the default for now
wfh_info = "at all times, unless mentioned otherwise below"
# regex: ([a-zA-Z].+-\d+)(.*)((?<=\[).+(?=\]))
# text: SCAN-4167 As a developer, I want to update AIScanRobo every week [1h]
# 3 groups:
# SCAN-4167
# As a developer, I want to update AIScanRobo every week [
# 1h
r = re.compile(r"([a-zA-Z].+-\d+)(.*)((?<=\[).+(?=\]))")
total_time_minutes_from_tasks = 0
tasks = []
for idx in range(COL_TASKS_START, max_cols):
task = found_row[idx].strip()
if task:
t = task.split('\n')[0] if '\n' in task else task
try:
g = r.match(t).groups()
except Exception as ex:
print("ERROR: %s - %s" % (t, str(ex)))
continue
if DEBUG:
print("task: %s" % (t))
print("groups: %s" % len(g))
[task_number, task_details, task_duration] = g
hours, half_hours = calc(task_duration.replace("h", ""), split_char=".")
minutes = (hours * 60) + (6 * half_hours)
total_time_minutes_from_tasks += minutes
other_lines = task.split('\n')[1:]
tasks.append("%s %s\n%s" % (task_number.strip(), task_details[:-2].strip(), '\n'.join(other_lines)))
def format_tasks(tasks):
if not tasks:
return ''
result = 'Tasks:\n'
for task in tasks:
if '\n' in task:
sub_tasks = task.split('\n')
if len(sub_tasks) > 1:
result += '\n* ' + sub_tasks[0] # main task
for sub_task in sub_tasks[1:]: # actual sub tasks
result += '\n\t' + sub_task
result += '\n'
else:
result += '\n* ' + task
else:
result += '\n* ' + task
return result
def format_notes(notes):
if not notes or (len(notes) == 1 and not notes[0]):
return ''
result = 'Additional Notes:\n'
for note in notes:
result += '\n* ' + note
return result
total_hours = str(int(total_time_minutes_from_tasks / 60)).zfill(2)
total_minutes = str(total_time_minutes_from_tasks % 60).zfill(2)
total_duration = "%s:%s" % (total_hours, total_minutes)
test_duration = duration
if len(test_duration) <= 4:
test_duration = "0%s" % duration
if total_duration != test_duration:
print("")
print("")
print("The task times do not add up! Tasks vs time entered: %s != %s" % (total_duration, test_duration))
print("")
print("")
# Time: %(start)s - %(end)s (%(duration)s hours total [%(total_hours)s:%(total_minutes)s])
msg = """
[Daily Report] %(date)s
WFH: %(wfh_info)s
Hi,
Daily Report for Date: %(date)s
%(tasks)s
%(notes)s
Kind regards,
%(user_full_name)s
""".strip() % {
"date": date,
"user_full_name": user_full_name,
"start": start,
"end": end,
"duration": duration,
"wfh_info": wfh_info,
"tasks": format_tasks(tasks) if tasks else "",
"notes": format_notes(notes) if notes else "",
"total_hours": total_hours,
"total_minutes": total_minutes,
}
print("Total time for all tasks (%s): %s - %s:%s" % (len(tasks), total_time_minutes_from_tasks, total_hours, total_minutes))
return msg
def _load_sheet_data(api, timesheet_url, arg_date=None):
try:
date = arrow.get(arg_date, 'YYYYMM')
except Exception: # pylint: disable=W0703
now = arrow.now()
date = now.format('YYYYMM')
rows = load_first_sheet_rows(api, timesheet_url, date)
date_str = str(date.format('YYYYMM'))
return (rows, date_str)
def export_csv(api, timesheet_url, arg_date):
rows, date = _load_sheet_data(api, timesheet_url, arg_date)
filtered = [row for row in rows if row and str(row[COL_DATE]).startswith(date)]
if filtered is None or not filtered:
return None
csv_filename = os.path.join(os.getcwd(), "%s.csv" % (arg_date))
print("")
print("Found (%d) entries for date %s!" % (len(filtered), date))
print("Writing to %s" % (csv_filename))
with open(csv_filename, mode='w') as f:
f = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# f.writerow(['<NAME>', 'Accounting', 'November'])
f.writerow(["username", "date", "task", "duration", "work_type", "details"])
def w(task, duration_minutes, details = ""):
work_type = "Meeting" if "meeting" in details.lower() else "Development"
# Needed CSV columns
# username|date|task|duration|work_type|details
f.writerow(["daniel", arrow.get(str(date), 'YYYYMMDD').format('YYYY.MM.DD'), task, "%dm" % (duration_minutes), work_type, details])
# regex: ([a-zA-Z].+-\d+)(.*)((?<=\[).+(?=\]))
# text: SCAN-4167 As a developer, I want to update AIScanRobo every week [1h]
# 3 groups:
# SCAN-4167
# As a developer, I want to update AIScanRobo every week [
# 1h
r = re.compile(r"([a-zA-Z].+-\d+)(.*)((?<=\[).+(?=\]))")
for row in filtered:
max_cols = len(row)
time = row[COL_TIME_FIXED] if max_cols >= COL_TIME_FIXED else None
time_start = row[COL_TIME_START] if max_cols >= COL_TIME_START else None
time_end = row[COL_TIME_END] if max_cols >= COL_TIME_END else None
date = row[COL_DATE] if max_cols >= COL_DATE else None
if time_start is None or time_end is None or date is None:
continue
tasks = []
for idx in range(COL_TASKS_START, max_cols):
task = row[idx].strip()
if task:
tasks.append(task)
if len(tasks) == 0:
print("%s: no tasks found! %s" % (date, time_start))
continue
print("%s: %d tasks found!" % (date, len(tasks)))
for task in tasks:
t = task.split('\n')[0] if '\n' in task else task
try:
g = r.match(t).groups()
except Exception as ex:
print("ERROR: %s - %s" % (t, str(ex)))
continue
if DEBUG:
print("task: %s" % (t))
print("groups: %s" % len(g))
[task_number, task_details, duration] = g
hours, half_hours = calc(duration.replace("h", ""), split_char=".")
minutes = (hours * 60) + (6 * half_hours)
if DEBUG:
print("time: %s, %s $ %s $ %s" % (hours, half_hours, duration, minutes))
details = "%s %s" % (task_number, task_details[:-1].strip())
w(task_number, minutes, details.strip())
print("")
print("CSV output to: %s" % (csv_filename))
def calc_daily_hours_for_month(api, timesheet_url, arg_date):
rows, date = _load_sheet_data(api, timesheet_url, arg_date)
filtered = [row for row in rows if row and str(row[COL_DATE]).startswith(date)]
if filtered is None or not filtered:
return None
print("")
print("Found (%d) entries for date %s!" % (len(filtered), date))
minutes = 0
days | |
<reponame>tslazarova/WMCore<filename>src/python/WMCore/MicroService/Unified/MSOutput.py
"""
File : MSOtput.py
Description: MSOutput.py class provides the whole logic behind
the Output data placement in WMCore MicroServices.
"""
# futures
from __future__ import division, print_function
# system modules
from pymongo import IndexModel, ReturnDocument, errors
from pymongo.command_cursor import CommandCursor
from pprint import pformat
from copy import deepcopy
from time import time
from socket import gethostname
from threading import current_thread
from retry import retry
# WMCore modules
from WMCore.MicroService.DataStructs.DefaultStructs import OUTPUT_PRODUCER_REPORT
from WMCore.MicroService.DataStructs.DefaultStructs import OUTPUT_CONSUMER_REPORT
from WMCore.MicroService.Unified.MSCore import MSCore
from WMCore.Services.DDM.DDM import DDM, DDMReqTemplate
from WMCore.Services.CRIC.CRIC import CRIC
from WMCore.Services.Rucio.Rucio import Rucio
from Utils.EmailAlert import EmailAlert
from Utils.Pipeline import Pipeline, Functor
from WMCore.Database.MongoDB import MongoDB
from WMCore.MicroService.DataStructs.MSOutputTemplate import MSOutputTemplate
from WMCore.MicroService.Unified.MSOutputStreamer import MSOutputStreamer
from WMCore.WMException import WMException
class MSOutputException(WMException):
"""
General Exception Class for MSOutput Module in WMCore MicroServices
"""
def __init__(self, message):
self.myMessage = "MSOtputException: %s" % message
super(MSOutputException, self).__init__(self.myMessage)
class EmptyResultError(MSOutputException):
"""
A MSOutputException signalling an empty result from database query.
"""
def __init__(self, message=None):
if message:
self.myMessage = "EmptyResultError: %s"
else:
self.myMessage = "EmptyResultError."
super(EmptyResultError, self).__init__(self.myMessage)
class UnsupportedError(MSOutputException):
"""
A MSOutputException signalling an unsupported mode for a function or method.
"""
def __init__(self, message=None):
if message:
self.myMessage = "UnsupportedError: %s"
else:
self.myMessage = "UnsupportedError."
super(UnsupportedError, self).__init__(self.myMessage)
class MSOutput(MSCore):
"""
MSOutput.py class provides the whole logic behind the Output data placement
in MicroServices.
"""
def __init__(self, msConfig, mode, logger=None):
"""
Runs the basic setup and initialization for the MSOutput module
:microConfig: microservice configuration
:mode: MSOutput Run mode:
- MSOutputConsumer:
Reads The workflow and transfer subscriptions from MongoDB and
makes transfer subscriptions.
- MSOutputProducer:
Fetches Workflows in a given status from Reqmgr2 then creates
and uploads the documents to MongoDB.
"""
super(MSOutput, self).__init__(msConfig, logger)
self.mode = mode
self.msConfig.setdefault("limitRequestsPerCycle", 500)
self.msConfig.setdefault("verbose", True)
self.msConfig.setdefault("interval", 600)
self.msConfig.setdefault("services", ['output'])
self.msConfig.setdefault("defaultDataManSys", "DDM")
self.msConfig.setdefault("defaultGroup", "DataOps")
self.msConfig.setdefault("enableAggSubscr", True)
self.msConfig.setdefault("enableDataPlacement", False)
self.msConfig.setdefault("excludeDataTier", ['NANOAOD', 'NANOAODSIM'])
self.msConfig.setdefault("rucioAccount", 'wma_test')
self.msConfig.setdefault("mongoDBUrl", 'mongodb://localhost')
self.msConfig.setdefault("mongoDBPort", 8230)
self.msConfig.setdefault("streamerBufferFile", None)
self.uConfig = {}
self.emailAlert = EmailAlert(self.msConfig)
self.cric = CRIC(logger=self.logger)
self.uConfig = {}
self.campaigns = {}
self.psn2pnnMap = {}
msOutIndex = IndexModel('RequestName', unique=True)
msOutDBConfig = {
'database': 'msOutDB',
'server': self.msConfig['mongoDBUrl'],
'port': self.msConfig['mongoDBPort'],
'logger': self.logger,
'create': True,
'collections': [
('msOutRelValColl', msOutIndex),
('msOutNonRelValColl', msOutIndex)]}
self.msOutDB = MongoDB(**msOutDBConfig).msOutDB
self.msOutRelValColl = self.msOutDB['msOutRelValColl']
self.msOutNonRelValColl = self.msOutDB['msOutNonRelValColl']
if self.msConfig['defaultDataManSys'] == 'DDM':
self.ddm = DDM(url=self.msConfig['ddmUrl'],
logger=self.logger,
enableDataPlacement=self.msConfig['enableDataPlacement'])
elif self.msConfig['defaultDataManSys'] == 'Rucio':
self.rucio = Rucio(self.msConfig['rucioAccount'],
configDict={"logger": self.logger})
@retry(tries=3, delay=2, jitter=2)
def updateCaches(self):
"""
Fetch some data required for the output logic, e.g.:
* unified configuration
"""
self.logger.info("Updating local cache information.")
self.uConfig = self.unifiedConfig()
campaigns = self.reqmgrAux.getCampaignConfig("ALL_DOCS")
self.psn2pnnMap = self.cric.PSNtoPNNMap()
if not self.uConfig:
raise RuntimeWarning("Failed to fetch the unified configuration")
elif not campaigns:
raise RuntimeWarning("Failed to fetch the campaign configurations")
elif not self.psn2pnnMap:
raise RuntimeWarning("Failed to fetch PSN x PNN map from CRIC")
else:
# let's make campaign look-up easier and more efficient
self.campaigns = {}
for camp in campaigns:
self.campaigns[camp['CampaignName']] = camp
def execute(self, reqStatus):
"""
Executes the whole output data placement logic
:return: summary
"""
# start threads in MSManager which should call this method
# NOTE:
# Here we should make the whole logic - like:
# * Calling the system to fetch the workflows from;
# * Creating the workflow objects;
# * Pushing them into the back end database system we choose for bookkeeping
# * Updating their status in that system, both MsStatus (subscribed,
# processing, etc.) and also the Reqmgr status
# * Associate and keep track of the requestID/subscriptionID/ruleID
# returned by the Data Management System and the workflow
# object (through the bookkeeping machinery we choose/develop)
self.currHost = gethostname()
self.currThread = current_thread()
self.currThreadIdent = "%s:%s@%s" % (self.currThread.name, self.currThread.ident, self.currHost)
if self.mode == 'MSOutputProducer':
summary = self._executeProducer(reqStatus)
elif self.mode == 'MSOutputConsumer':
summary = self._executeConsumer()
else:
msg = "MSOutput is running in unsupported mode: %s\n" % self.mode
msg += "Skipping the current run!"
self.logger.warning(msg)
return summary
def _executeProducer(self, reqStatus):
"""
The function to update caches and to execute the Producer function itslef
"""
summary = dict(OUTPUT_PRODUCER_REPORT)
self.updateReportDict(summary, "thread_id", self.currThreadIdent)
msg = "{}: MSOutput is running in mode: {}".format(self.currThreadIdent, self.mode)
self.logger.info(msg)
try:
requestRecords = {}
for status in reqStatus:
numRequestRecords = len(requestRecords)
requestRecords.update(self.getRequestRecords(status))
msg = "{}: Retrieved {} requests in status {} from ReqMgr2. ".format(self.currThreadIdent,
len(requestRecords) - numRequestRecords,
status)
self.logger.info(msg)
except Exception as err: # general error
msg = "{}: Unknown exception while fetching requests from ReqMgr2. ".format(self.currThreadIdent)
msg += "Error: {}".format(str(err))
self.logger.exception(msg)
try:
self.updateCaches()
except RuntimeWarning as ex:
msg = "{}: All retries exhausted! Last error was: '{}'".format(self.currThreadIdent,
str(ex))
msg += "\nRetrying to update caches again in the next cycle."
self.logger.error(msg)
self.updateReportDict(summary, "error", msg)
return summary
except Exception as ex:
msg = "{}: Unknown exception updating caches. ".format(self.currThreadIdent)
msg += "Error: {}".format(str(ex))
self.logger.exception(msg)
self.updateReportDict(summary, "error", msg)
return summary
try:
streamer = MSOutputStreamer(bufferFile=self.msConfig['streamerBufferFile'],
requestRecords=requestRecords,
logger=self.logger)
total_num_requests = self.msOutputProducer(streamer())
msg = "{}: Total {} requests processed from the streamer. ".format(self.currThreadIdent,
total_num_requests)
self.logger.info(msg)
self.updateReportDict(summary, "total_num_requests", total_num_requests)
except Exception as ex:
msg = "{}: Unknown exception while running the Producer thread. ".format(self.currThreadIdent)
msg += "Error: {}".format(str(ex))
self.logger.exception(msg)
self.updateReportDict(summary, "error", msg)
return summary
def _executeConsumer(self):
"""
The function to execute the Consumer function itslef
"""
summary = dict(OUTPUT_CONSUMER_REPORT)
self.updateReportDict(summary, "thread_id", self.currThreadIdent)
msg = "{}: MSOutput is running in mode: {} ".format(self.currThreadIdent, self.mode)
self.logger.info(msg)
msg = "{}: Service set to process up to {} requests ".format(self.currThreadIdent,
self.msConfig["limitRequestsPerCycle"])
msg += "per cycle per each type 'RelVal' and 'NonRelval' workflows."
self.logger.info(msg)
if not self.msConfig['enableDataPlacement']:
msg = "{} enableDataPlacement = False. ".format(self.currThreadIdent)
msg += "Running the MSOutput service in dry run mode"
self.logger.warning(msg)
try:
total_num_requests = self.msOutputConsumer()
msg = "{}: Total {} requests processed. ".format(self.currThreadIdent,
total_num_requests)
self.logger.info(msg)
self.updateReportDict(summary, "total_num_requests", total_num_requests)
except Exception as ex:
msg = "{}: Unknown exception while running Consumer thread. ".format(self.currThreadIdent)
msg += "Error: {}".format(str(ex))
self.logger.exception(msg)
self.updateReportDict(summary, "error", msg)
return summary
def makeSubscriptions(self, workflow):
"""
The common function to make the final subscriptions. It depends on the
default Data Management System configured through msConfig. Based on that
The relevant service wrapper is called.
:return: A list of results from the REST interface of the DMS in question
"""
# NOTE:
# Here is just an example construction of the function. None of the
# data structures used to visualise it is correct. To Be Updated
if self.msConfig['defaultDataManSys'] == 'DDM':
# NOTE:
# We always aggregate per workflow here (regardless of enableAggSubscr)
# and then if we work in strides and enableAggSubscr is True then
# we will aggregate all similar subscription for all workflows
# in a single subscription - then comes the mess how to map back
# which workflow's outputs went to which transfer subscription etc.
# (TODO:)
#
# NOTE:
# Once we move to working in strides of multiple workflows at a time
# then the workflow sent to that function should not be a single one
# but an iterator of length 'stride' and then we should be doing:
# for workflow in workflows:
if isinstance(workflow, MSOutputTemplate):
ddmReqList = []
try:
if workflow['isRelVal']:
group = 'RelVal'
else:
group = 'DataOps'
for dMap in workflow['destinationOutputMap']:
try:
ddmRequest = DDMReqTemplate('copy',
item=dMap['datasets'],
n=workflow['numberOfCopies'],
site=dMap['destination'],
group=group)
except KeyError as ex:
# NOTE:
# If we get to here it is most probably because the 'site'
# mandatory field to the DDM request is missing (due to an
# 'ALCARECO' dataset from a Relval workflow or similar).
# Since this is expected to happen a lot, we'd better just
# log a warning and continue
msg = "Could not create DDMReq for Workflow: {}".format(workflow['RequestName'])
msg += "Error: {}".format(ex)
self.logger.warning(msg)
continue
ddmReqList.append(ddmRequest)
except Exception as ex:
msg = "Could not create DDMReq for Workflow: {}".format(workflow['RequestName'])
msg += "Error: {}".format(ex)
self.logger.exception(msg)
return workflow
try:
# In the message bellow we may want to put the list of datasets too
self.logger.info("Making transfer subscriptions for %s", workflow['RequestName'])
if ddmReqList:
ddmResultList = self.ddm.makeAggRequests(ddmReqList, aggKey='item')
else:
# NOTE:
# Nothing else to be done here. We mark the document as
# done so we do not iterate through it multiple times
msg = "Skip submissions for %s. Either all data Tiers were "
msg += "excluded or there were no Output Datasets | |
over time points, not samples
rand_ph_shuff = tf.transpose(
self.latent_rand_samples[:, 1:, :], perm=[1, 0, 2])
z_samples = tf.scan(
fn=lds_update,
elems=rand_ph_shuff,
initializer=z0_samples)
# concat across time (num_samples x num_time_pts x dim_latent)
self.z_samples_prior = tf.concat(
[tf.expand_dims(z0_samples, axis=1),
tf.transpose(z_samples, perm=[1, 0, 2])], axis=1)
def _sample_y(self):
# expand dims to account for time and mc dims when applying mapping
# now (1 x num_samples x num_time_pts x dim_latent)
z_samples_ex = tf.expand_dims(self.z_samples_prior, axis=0)
y_means_ls = [] # contribution from latent space
y_means_lp = [] # contribution from linear predictors
y_means = []
for pop, pop_dim in enumerate(self.dim_obs):
y_means_ls.append(tf.squeeze(self.networks[pop].apply_network(
z_samples_ex[:, :, :,
self.latent_indxs[pop][0]:
self.latent_indxs[pop][-1]]),
axis=0))
if self.num_clusters is not None:
F = tf.expand_dims(y_means_ls[-1], axis = 2)
y_means_ls[-1] = tf.squeeze(tf.matmul(F, self.mark_probs))
if self.dim_predictors is not None:
# append new list for this population
y_means_lp.append([])
for pred, pred_dim in enumerate(self.dim_predictors):
if self.predictor_indx[pop][pred] is not None:
net_out = self.networks_linear[pop][pred]. \
apply_network(self.linear_predictors_phs[pred])
y_means_lp[-1].append(net_out)
# else:
# self.y_pred_lp[-1].append(0.0)
y_means.append(
tf.add(y_means_ls[-1], tf.add_n(y_means_lp[-1])))
else:
y_means.append(y_means_ls[-1])
# get random samples from observation space
if self.noise_dist is 'gaussian':
obs_rand_samples = []
for pop, pop_dim in enumerate(self.dim_obs):
obs_rand_samples.append(tf.random_normal(
shape=[self.num_samples_ph, self.num_time_pts, pop_dim],
mean=0.0, stddev=1.0, dtype=self.dtype,
name=str('obs_rand_samples_%02i' % pop)))
self.y_samples_prior.append(y_means[pop] + tf.multiply(
obs_rand_samples[pop], self.R_sqrt[pop]))
elif self.noise_dist is 'poisson':
for pop, pop_dim in enumerate(self.dim_obs):
self.y_samples_prior.append(tf.squeeze(tf.random_poisson(
lam=y_means[pop], shape=[1], dtype=self.dtype), axis=0))
def log_density(self, y, z):
"""
Evaluate log density for generative model, defined as
p(y, z) = p(y | z) p(z)
where
p(z) = \prod_t p(z_t), z_t ~ N(A z_{t-1}, Q)
p(y | z) = \prod_t p(y_t | z_t)
Args:
y (batch_size x num_mc_samples x num_time_pts x dim_obs tf.Tensor)
z (batch_size x num_mc_samples x num_time_pts x dim_latent
tf.Tensor)
Returns:
float: log density over y and z, averaged over minibatch samples
and monte carlo samples
"""
# likelihood
with tf.variable_scope('likelihood'):
self.log_density_y = self._log_density_likelihood(y)
# prior
with tf.variable_scope('prior'):
self.log_density_z = self._log_density_prior(z)
return self.log_density_y + self.log_density_z
def _log_density_likelihood(self, y):
log_density_y = []
for pop, pop_dim in enumerate(self.dim_obs):
with tf.variable_scope('population_%02i' % pop):
if self.noise_dist is 'gaussian':
# expand observation dims over mc samples
res_y = tf.expand_dims(y[pop], axis=1) - self.y_pred[pop]
# average over batch and mc sample dimensions
res_y_R_inv_res_y = tf.reduce_mean(
tf.multiply(tf.square(res_y), self.R_inv[pop]),
axis=[0, 1])
# sum over time and observation dimensions
test_like = tf.reduce_sum(res_y_R_inv_res_y)
tf.summary.scalar('log_joint_like', -0.5 * test_like)
# total term for likelihood
log_density_y.append(-0.5 * (test_like
+ self.num_time_pts * tf.reduce_sum(
tf.log(self.R[pop]))
+ self.num_time_pts * pop_dim * tf.log(2.0 * np.pi)))
elif self.noise_dist is 'poisson':
# expand observation dims over mc samples
obs_y = tf.expand_dims(y[pop], axis=1)
# average over batch and mc sample dimensions
log_density_ya = tf.reduce_mean(
tf.multiply(obs_y[pop], tf.log(self.y_pred[pop]))
- self.y_pred[pop]
- tf.lgamma(1 + obs_y[pop]),
axis=[0, 1])
# sum over time and observation dimensions
log_density_y.append(tf.reduce_sum(log_density_ya))
tf.summary.scalar('log_joint_like', log_density_y[-1])
else:
raise ValueError
return tf.add_n(log_density_y, name='log_joint_like_total')
def _log_density_prior(self, z):
self.res_z0 = res_z0 = z[:, :, 0, :] - self.z0_mean
self.res_z = res_z = z[:, :, 1:, :] - tf.tensordot(
z[:, :, :-1, :], tf.transpose(self.A), axes=[[3], [0]])
# average over batch and mc sample dimensions
res_z_Q_inv_res_z = tf.reduce_mean(tf.multiply(
tf.tensordot(res_z, self.Q_inv, axes=[[3], [0]]), res_z),
axis=[0, 1])
res_z0_Q0_inv_res_z0 = tf.reduce_mean(tf.multiply(
tf.tensordot(res_z0, self.Q0_inv, axes=[[2], [0]]), res_z0),
axis=[0, 1])
# sum over time and latent dimensions
test_prior = tf.reduce_sum(res_z_Q_inv_res_z)
test_prior0 = tf.reduce_sum(res_z0_Q0_inv_res_z0)
tf.summary.scalar('log_joint_prior', -0.5 * test_prior)
tf.summary.scalar('log_joint_prior0', -0.5 * test_prior0)
# total term for prior
log_density_z = -0.5 * (test_prior + test_prior0
+ (self.num_time_pts - 1) * tf.log(tf.matrix_determinant(self.Q))
+ tf.log(tf.matrix_determinant(self.Q0))
+ self.num_time_pts * sum(self.dim_latent) * tf.log(2.0 * np.pi))
return log_density_z
def sample(self, sess, num_samples=1, seed=None, linear_predictors=None, mark_probs = None):
"""
Generate samples from the model
Args:
sess (tf.Session object)
num_samples (int, optional)
seed (int, optional)
linear_predictors (list)
Returns:
num_samples x num_time_pts x dim_obs x numpy array:
sample observations y
num_samples x num_time_pts x dim_latent numpy array:
sample latent states z
"""
if seed is not None:
tf.set_random_seed(seed)
if self.dim_predictors is not None and linear_predictors is None:
raise ValueError('must supply linear predictors for sampling')
if self.num_clusters is not None and mark_probs is None:
raise ValueError('must supply mark probabilities for sampling')
feed_dict = {self.num_samples_ph: num_samples}
if self.dim_predictors is not None:
for pred, pred_ph in enumerate(self.linear_predictors_phs):
feed_dict[pred_ph] = linear_predictors[pred]
if self.num_clusters is not None:
feed_dict[self.mark_probs] = mark_probs
[y, z] = sess.run(
[self.y_samples_prior, self.z_samples_prior],
feed_dict=feed_dict)
return y, z
def get_params(self, sess):
"""Get parameters of generative model"""
if self.noise_dist is 'gaussian':
A, R_sqrt, z0_mean, Q, Q0 = sess.run(
[self.A, self.R_sqrt, self.z0_mean, self.Q, self.Q0])
param_dict = {
'A': A, 'R': np.square(R_sqrt), 'z0_mean': z0_mean,
'Q': Q, 'Q0': Q0}
elif self.noise_dist is 'poisson':
A, z0_mean, Q, Q0 = sess.run(
[self.A, self.z0_mean, self.Q, self.Q0])
param_dict = {
'A': A, 'z0_mean': z0_mean, 'Q': Q, 'Q0': Q0}
else:
raise ValueError
return param_dict
def get_linear_params(self, sess):
"""Get parameters of linear regressors"""
param_dict = []
for pop, pop_dim in enumerate(self.dim_obs):
param_dict.append([])
for pred, pred_dim in enumerate(self.dim_predictors):
if self.predictor_indx[pop][pred] is not None:
layer_weights_ = sess.run(
self.networks_linear[pop][pred].layers[0].weights)
else:
layer_weights_ = []
param_dict[pop].append(layer_weights_)
return param_dict
class NetLDS(NetFLDS):
"""
Generative model is defined as
z_t ~ N(A z_{t-1}, Q)
y_t^i ~ N(C_i z_t^i + d_i, R_i)
for each population i, where the z_t^i are non-overlapping subsets of z_t
"""
def __init__(
self, dim_obs=None, dim_latent=None, linear_predictors=None,
num_time_pts=None, gen_params=None, noise_dist='gaussian',
post_z_samples=None, **kwargs):
"""
Args:
dim_obs (list): observation dimension for each population
dim_latent (list): latent dimension for each population
linear_predictors (dict):
'dim_predictors' (list): dimension for each set of linear
predictors
'predictor_indx' (list of lists): each element of the list
contains the indices of the predictors in the
`dim_predictors` list used by the corresponding population
num_time_pts (int): number of time points per observation of the
dynamical sequence
gen_params (dict): dictionary of generative params for initializing
model
noise_dist (str): 'gaussian' | 'poisson'
post_z_samples (batch_size x num_mc_samples x num_time_pts x
dim_latent tf.Tensor): samples from the (appx) posterior of the
latent states
"""
if gen_params is None:
gen_params = {}
# iterate through populations
# NOTE: must set kernel/bias initializers outside of this constructor
# for now since NetFLDS assumes nn_params is the same for each pop
for pop, _ in enumerate(dim_obs):
# emissions matrix
if 'C' in gen_params:
kernel_initializer = tf.constant_initializer(
gen_params['C'][pop], dtype=self.dtype)
else:
kernel_initializer = 'trunc_normal'
# biases
if 'd' in gen_params:
bias_initializer = tf.constant_initializer(
gen_params['d'][pop], dtype=self.dtype)
else:
bias_initializer = 'zeros'
# list of dicts specifying (linear) nn to observations
nn_params = [{
'units': dim_obs[pop],
'activation': 'linear',
'kernel_initializer': kernel_initializer,
'bias_initializer': bias_initializer,
'kernel_regularizer': None,
'bias_regularizer': None}]
super().__init__(
dim_obs=dim_obs, dim_latent=dim_latent, nn_params=nn_params,
linear_predictors=linear_predictors, noise_dist=noise_dist,
post_z_samples=post_z_samples, num_time_pts=num_time_pts,
gen_params=gen_params)
def get_params(self, sess):
"""Get parameters of generative model"""
param_dict = super().get_params(sess)
param_dict['C'] = []
param_dict['d'] = []
for pop, pop_dim in enumerate(self.dim_obs):
layer_weights = sess.run(self.networks[pop].layers[0].weights)
param_dict['C'].append(layer_weights[0])
param_dict['d'].append(layer_weights[1])
return param_dict
class FLDS(NetFLDS):
"""
Generative model is defined as
z_t ~ N(A z_{t-1}, Q)
E[y_t] ~ f(z_t)
"""
def __init__(
self, dim_obs=None, dim_latent=None, dim_predictors=None,
num_time_pts=None, gen_params=None, noise_dist='gaussian',
nn_params=None, post_z_samples=None, train_A = True, train_Q0 = True,
num_clusters = None, **kwargs):
"""
Args:
dim_obs (int): observation dimension
dim_latent (int): latent dimension
dim_predictors (list): dimension for each set of linear predictors
num_time_pts (int): number of time points per observation of the
dynamical sequence
gen_params (dict): dictionary of generative params for initializing
model
noise_dist (str): 'gaussian' | 'poisson'
nn_params (list): dictionaries for building each layer of the
mapping from the latent space to observations; the same
network architecture is used for each population
post_z_samples (batch_size x num_mc_samples x num_time_pts x
dim_latent tf.Tensor): samples from the (appx) posterior of the
latent states
"""
if dim_predictors is not None:
linear_predictors = {
'dim_predictors': dim_predictors,
'predictor_indx': [range(len(dim_predictors))]}
if 'predictor_params' in kwargs:
linear_predictors['predictor_params'] = \
[kwargs['predictor_params']]
else:
linear_predictors = None
super().__init__(
dim_obs=[dim_obs], dim_latent=[dim_latent],
linear_predictors=linear_predictors,
post_z_samples=post_z_samples, num_time_pts=num_time_pts,
gen_params=gen_params, nn_params=nn_params, noise_dist=noise_dist,
train_A = train_A, train_Q0 = train_Q0, num_clusters = num_clusters)
def sample(self, sess, num_samples=1, seed=None, linear_predictors=None, mark_probs = None):
y, z = super().sample(sess, num_samples, seed, linear_predictors, mark_probs)
return y[0], z
class LDS(NetFLDS):
"""
Generative model is defined as
z_t ~ N(A z_{t-1}, Q)
y_t ~ N(C z_t + d, R)
"""
def __init__(
self, dim_obs=None, dim_latent=None, dim_predictors=None,
num_time_pts=None, gen_params=None, noise_dist='gaussian',
post_z_samples=None, **kwargs):
"""
Args:
dim_obs (int): observation dimension
dim_latent (int): latent dimension
dim_predictors (list): dimension for each set of linear predictors
num_time_pts (int): number of time | |
"""
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from matplotlib.patches import FancyArrowPatch, Circle, Ellipse
from matplotlib.colors import ListedColormap
import matplotlib.ticker as ticky
import matplotlib.colors as mcolors
import numpy as np
maxfontsize= 22
#HELPERS
def preserve_eps_text():
plt.rcParams['svg.fonttype'] = 'none' #preseve text as text
def extr_param(name, cont, i= -1):
param_= None
if(name in cont):
param_= cont[name][i] if (isinstance(cont[name], list) and i>= 0) else cont[name]
return param_
def setup_grid(plt, majorcolor= '#404040', minorcolor= '#595959'):
plt.minorticks_on()
plt.grid(b=False, color= majorcolor, linestyle='-', linewidth=0.8)
plt.grid(b=False, which='minor', color= minorcolor, linestyle='-', alpha=0.2)
#plt.locator_params(axis='x', nbins=15)
def format_axis_E(axisobj):
axisobj.set_major_formatter(ticky.FormatStrFormatter('%.1E'))
def D_frm(x, pos):
return "%d" % (x)
def E_frm(x, pos):
return "%.1E" % (x)
def d3_frm(x, pos):
return "%d" % int(x*1e3)
def m3_frm(x, pos):
return "%.1f" % (x*1e3)
def mn20_frm(x, pos):
return "%.1f" % (x*1e-20)
def m3_2f_frm(x, pos):
return "%.2f" % (x*1e3)
def m0_3f_frm(x, pos):
return "%.3f" % (x)
def m0_frm(x, pos):
return "%.1f" % x
def m02_frm(x, pos):
return "%.2f" % x
def format_axis_func(axisobj, frmfunc):
axisobj.set_major_formatter(ticky.FuncFormatter(frmfunc))
def disable_xaxis(subplt, lblbottom= False):
subplt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom= lblbottom
)
#frame snapshot plots
"""
def make_colormap(colormap_filename):
rgb_colours = extract_colmap_colors(colormap_filename)
return ListedColormap(rgb_colours[::-1])
"""
#xaxis format [data, label, lims, frmfunc, inv]
#xscale can be: linear, log, symlog, logit
def x_(data, label, xlow= None, xhigh= None, frmfunc= None, xticks= None, xscale= "linear"):
return { "d": data, "lab": label, "lims": [xlow, xhigh], "frmfunc": frmfunc, "inv": False, "xticks": xticks, "scal": xscale }
#yaxis format [data, label, legend, lims, markers, style, frmfunc, col, lthk]
def y_(data, label, legend= None, lthk= 2, color= None, ylow= None, yhigh= None, noticks= False, styl= None, marker= None, frmfunc= None, yscale= "linear"):
return { "d": data, "lab": label, "leg": legend, "lims": [ylow, yhigh], "noticks": noticks, "markers": marker, "style": styl if styl is not None else "-", "frmfunc": frmfunc, "col": color, "lthk": lthk, "scal": yscale }
def colorz_(data, label= None, cmapname= "rainbow", zlow= None, zhigh= None, frmfunc= None, docontour= False):
return { "d": data, "lab": label, "lims": [zlow, zhigh], "cmapname": cmapname, "frmfunc": frmfunc, "docont": docontour }
def arrowz_(datax, datay, label= None, qsett= None, zlow= None, zhigh= None, frmfunc= None):
return { "d": (datax, datay), "lab": label, "sett": qsett, "frmfunc": frmfunc }
def quiverkeyp_(X= 0.46, Y= 0.9, U= 5, label= 'scale= 5 m/s', labelpos= 'E'):
return {
"X": X, "Y": Y, "U": U, "label": label, "labelpos": labelpos
}
def quiverp_(
width= 1.6e-2, scale= 23.2, linewidth= 0.8, minshaft= 1.2
, edgecolor= "black", facecolor= "white", qksett= None
):
return {
"width": width, "scale": scale, "linewidth": linewidth
, "minshaft": minshaft, "edgecolor": edgecolor, "facecolor": facecolor, "qksett": qksett
}
def gen_fig(N= 1, M= 1, dims= [12, 10]):#N is for i, M is for j
fig, axs = plt.subplots(ncols= N, nrows= M, figsize=(dims[0], dims[1])) # single figure setup)
return [fig, axs]
def fig_ax_(figobj, i= 0, j= 0):
fig_= figobj[0]
axs_= figobj[1]
col_= axs_[i] if(hasattr(axs_, "size")) else axs_
elem_= col_[j] if(hasattr(col_, "size")) else col_
return [fig_, elem_]
def pargs_(xdata, ydata, zdata= None, maxfntsize= maxfontsize, title= None, legpos= None, aspect= "auto", nogrid= False):
return [
[xdata, ydata, zdata]
, { "max_fontsize": maxfntsize, "title": title, "legpos": legpos, "aspect": aspect, "nogrid": nogrid}
]
def add_text(ax, x, y, str_, fntsize= maxfontsize*0.9):
props = dict(boxstyle='round', facecolor='white', alpha=0.9)
plt.text(x, y, str_, horizontalalignment='center', verticalalignment='center', transform = ax[1].transAxes, fontsize= fntsize, bbox=props)
def fig_plot(figobj_, filename= None, mtop= None, mright= None, mbottom= None, mleft= None, wspace= None, hspace= None):
fig_= figobj_[0]
#actual plot
#plt.tight_layout()
plt.subplots_adjust(top= mtop, left= mleft, right= mright, bottom= mbottom, hspace= hspace, wspace= wspace)
if(filename is None):
plt.show()
else:
plt.savefig(filename, bbox_inches='tight')
fig_.clear()
plt.close(fig_)
def add_errorbars(figax, xdata, ydata, yerrdata, capsize_= 10, elinewidth_= 3, ecolor_= 'black', barswidth_= 2):
(_, caps, _) = figax[1].errorbar(
xdata, ydata, yerr= yerrdata, ls='none', capsize=capsize_, elinewidth=elinewidth_, ecolor= ecolor_)
for cap in caps:
cap.set_markeredgewidth(barswidth_)
def add_legend(figobj_, maxfntsize, legloc= "upper center", ncols= 1, figpos= [0.5,0.95]):
fig_= figobj_[0]
fig_.legend(
loc= legloc, fontsize= maxfntsize*0.6
, ncol= ncols
, fancybox=True
, bbox_transform=fig_.transFigure
, bbox_to_anchor=(figpos[0], figpos[1])
)
def add_title(figobj_, title, maxfntsize):
fig_= figobj_[0]
fig_.suptitle(title, fontsize= maxfntsize)
def add_plot(args, fig):
data= args[0]
figprops= args[1]
fig_= fig[0]
ax= fig[1]
#figprops
fsize_max= figprops["max_fontsize"]
#set title
if ("title" in figprops):
ax.set_title(figprops["title"], size= fsize_max)
#fig rescaling
if ("aspect" in figprops):
ax.set_aspect(figprops["aspect"])
#fig_.subplots_adjust(right=0.80)
#extract data
xvar= data[0]
yvars= data[1]
#plot data
axi= 0
##axref= [0]*len(yvars)
axref= ax
axs= [None]*len(yvars)
plts_= [None]*len(yvars)
def extr_param(name, cont, i= 0):
param_= None
if(name in cont):
param_= cont[name][i] if (isinstance(cont[name], list)) else cont[name]
return param_
for yvar in yvars:
xvar_= xvar["d"][axi] if isinstance(xvar["d"], list) and len(xvar["d"])> axi else xvar["d"]
if(isinstance(yvar["d"], list)):
k_= 0
plts_[axi]= []
for yvar__ in yvar["d"]:
xvar__= xvar_[k_] if isinstance(xvar_, list) and len(xvar_)> k_ else xvar_
col_= extr_param("col", yvar, k_)
styl_= extr_param("style", yvar, k_)
mark_= extr_param("markers", yvar, k_)
lthk_= extr_param("lthk", yvar, k_)
leg_= extr_param("leg", yvar, k_)
plts_[axi].append(
axref.plot(
xvar__
, yvar__
, label= leg_
, linewidth= lthk_
, color= col_
, linestyle= styl_
, marker= mark_
)
)
k_= k_+1
else:
col_= extr_param("col", yvar)
styl_= extr_param("style", yvar)
mark_= extr_param("markers", yvar)
lthk_= extr_param("lthk", yvar)
leg_= extr_param("leg", yvar)
plts_[axi] = axref.plot(
xvar_
, yvar["d"]
, label= leg_
, linewidth= lthk_
, color= col_
, linestyle= styl_
, marker= mark_
)
#set y scale
if("scal" in yvar and yvar["scal"] is not None):
axref.set_yscale(yvar["scal"])
#set y label & limit
if(axi> 0):
axref.set_ylabel(yvar["lab"], size= fsize_max*0.9, rotation=0)#, rotation=0 if the label needs to be horizontal
else:
axref.set_ylabel(yvar["lab"], size= fsize_max*0.9)
if("lims" in yvar and yvar["lims"] is not None):
axref.set_ylim(yvar["lims"])
#format y axis
axref.tick_params(axis="both", labelsize= fsize_max*0.8)
if("frmfunc" in yvar and yvar["frmfunc"] is not None):
format_axis_func(axref.yaxis, yvar["frmfunc"])
#set x scale what?
if("scal" in xvar and xvar["scal"] is not None):
axref.set_xscale(xvar["scal"])
if(axi> 0):
disable_xaxis(axref)
axref.patch.set_visible(False)
axref.yaxis.set_label_coords(1.0 + 0.23*(axi-1), 1.06)
axref.spines["right"].set_position(("axes", 1.0 + 0.23*(axi-1)))
#make_patch_spines_invisible(ax)
#setup axis grid
#setup_grid(axref)
#rem axis
axs[axi]= axref
#clone axis
axi= axi+1
if(axi== len(yvars)):
break
axref= ax.twinx()
if("nogrid" not in figprops or figprops["nogrid"] == False):
setup_grid(ax)
#set x label & limit
ax.set_xlabel(xvar["lab"], size= fsize_max*0.9)
if("lims" in xvar and xvar["lims"] is not None):
ax.set_xlim(xvar["lims"])
if("xticks" in xvar and xvar["xticks"] is not None):
plt.xticks(ticks= xvar["xticks"][0], labels= xvar["xticks"][1], fontsize= fsize_max*0.8)
#format x axis
if("frmfunc" in xvar and xvar["frmfunc"] is not None):
format_axis_func(ax.xaxis, xvar["frmfunc"])
if ("legpos" in figprops and figprops["legpos"] is not None):
legpos_= figprops["legpos"]
if(isinstance(legpos_, list) and len(legpos_) == len(axs)):
for ax_ in axs:
j_= 0
if(isinstance(plts_[axi], list)):
lns= []
labs= []*len(plts_)
for plt_ in plts_:
if(isinstance(plt_, list) and len(plt_)> 1):
for subplt_ in plt_:
lns= lns+ subplt_
else:
lns= lns+ plt_
labs= [l.get_label() for l in lns]
ax_.legend(lns, labs,
loc= legpos_[j_], fontsize= fsize_max*0.7
, fancybox=True
)
else:
ax_.legend(
loc= legpos_[j_], fontsize= fsize_max*0.7
, fancybox=True
)
j_= j_+1
else:
#aggregate plots for common legend
lns= []
labs= []*len(plts_)
for plt_ in plts_:
if(isinstance(plt_, list) and len(plt_)> 1):
for subplt_ in plt_:
lns= lns+ subplt_
else:
lns= lns+ plt_
labs= [l.get_label() for l in lns]
ax.legend(lns, labs,
loc= figprops["legpos"], fontsize= fsize_max*0.7
, fancybox=True
)
#2D plots
def add_colorsurface_plot(args, fig):
data= args[0]
figprops= args[1]
fig_= fig[0]
ax= fig[1]
#figprops
fsize_max= extr_param("max_fontsize", figprops)
if(fsize_max is None): fsize_max= maxfontsize
#set title
title_= extr_param("title", figprops)
if (title_ is not None): ax.set_title(figprops["title"], size= fsize_max*0.9, y=1.02)
#fig rescaling
aspect_= extr_param("aspect", figprops)
if ("aspect" in figprops): ax.set_aspect(figprops["aspect"])
#fig_.subplots_adjust(right=0.80)
#extract data
xvar= data[0]#[0] | |
from __future__ import division
import sys
import os
import random
import argparse
from pathlib import Path
import torch
from torch.utils import data
import numpy as np
from tqdm import tqdm
from models import get_model
from datasets import get_dataset
from loss import *
from utils import *
from buffer import createBuffer
import time
def train(args):
# prepare datasets
if args.dataset == 'i19S':
datasetSs = get_dataset('7S')
datasetTs = get_dataset('12S')
else:
if args.dataset in ['7S', 'i7S']:
dataset_get = get_dataset('7S')
if args.dataset in ['12S', 'i12S']:
dataset_get = get_dataset('12S')
# loss
reg_loss = EuclideanLoss()
if args.model == 'hscnet':
cls_loss = CELoss()
if args.dataset in ['i7S', 'i12S', 'i19S']:
w1, w2, w3 = 1, 1, 100000
else:
w1, w2, w3 = 1, 1, 10
# prepare model and optimizer
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = get_model(args.model, args.dataset)
model.init_weights()
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.init_lr, eps=1e-8,
betas=(0.9, 0.999))
# resume from existing or start a new session
if args.resume is not None:
if os.path.isfile(args.resume):
print("Loading model and optimizer from checkpoint '{}'".format\
(args.resume))
checkpoint = torch.load(args.resume, map_location=device)
model.load_state_dict(checkpoint['model_state'])
optimizer.load_state_dict(checkpoint['optimizer_state'])
print("Loaded checkpoint '{}' (epoch{})".format(args.resume,
checkpoint['epoch']))
save_path = Path(args.resume)
args.save_path = save_path.parent
#start_epoch = checkpoint['epoch'] + 1
else:
print("No checkpoint found at '{}'".format(args.resume))
sys.exit()
else:
if args.dataset in ['i7S', 'i12S', 'i19S']:
model_id = "{}-{}-{}-initlr{}-iters{}-bsize{}-aug{}-{}".format(\
args.exp_name, args.dataset, args.model, args.init_lr, args.n_iter,
args.batch_size, int(args.aug), args.train_id)
else:
model_id = "{}-{}-{}-initlr{}-iters{}-bsize{}-aug{}-{}".format(\
args.exp_name, args.dataset, args.scene.replace('/','.'),
args.model, args.init_lr, args.n_iter, args.batch_size,
int(args.aug), args.train_id)
save_path = Path(model_id)
args.save_path = 'checkpoints'/save_path
args.save_path.mkdir(parents=True, exist_ok=True)
start_epoch = 1
# Continual learning over scenes
buffer = createBuffer(data_path=args.data_path, exp=args.exp_name, buffer_size=args.buffer_size, dataset= args.dataset)
if args.dataset == 'i7S':
scenes = ['chess', 'fire', 'heads', 'office', 'pumpkin', 'redkitchen', 'stairs']
if args.dataset == 'i12S':
scenes = ['apt1/kitchen','apt1/living','apt2/bed',
'apt2/kitchen','apt2/living','apt2/luke','office1/gates362',
'office1/gates381','office1/lounge','office1/manolis',
'office2/5a','office2/5b']
if args.dataset == 'i19S':
scenes = ['chess', 'fire', 'heads', 'office', 'pumpkin', 'redkitchen', 'stairs', 'apt1/kitchen','apt1/living','apt2/bed',
'apt2/kitchen','apt2/living','apt2/luke','office1/gates362',
'office1/gates381','office1/lounge','office1/manolis',
'office2/5a','office2/5b']
for i,scene in enumerate(scenes):
# if not first scene
if args.dataset in ['i7S', 'i12S']:
if i > 0:
dataset = dataset_get(args.data_path, args.dataset, args.scene, split='train_{}'.format(scene),
model=args.model, aug=args.aug, Buffer=True, dense_pred_flag=args.dense_pred, exp=args.exp_name)
else:
dataset = dataset_get(args.data_path, args.dataset, args.scene, split='train_{}'.format(scene),
model=args.model, aug=args.aug, Buffer=False, exp=args.exp_name)
trainloader = data.DataLoader(dataset, batch_size=args.batch_size,
num_workers=4, shuffle=True)
buffer_dataset = dataset_get(args.data_path, args.dataset, args.scene, split='train_{}'.format(scene),
model=args.model, aug=False, Buffer=False, dense_pred_flag=args.dense_pred, exp=args.exp_name)
buffer_trainloader = data.DataLoader(buffer_dataset, batch_size=args.batch_size, num_workers=4, shuffle=True)
if args.dataset == 'i19S':
if i == 0:
dataset = datasetSs(args.data_path, args.dataset, args.scene, split='train_{}'.format(scene),
model=args.model, aug=args.aug, Buffer=False, exp=args.exp_name)
buffer_dataset = datasetSs(args.data_path, args.dataset, args.scene, split='train_{}'.format(scene),
model=args.model, aug=False, Buffer=False, dense_pred_flag=args.dense_pred, exp=args.exp_name)
if i >0 and i < 7:
dataset = datasetSs(args.data_path, args.dataset, args.scene, split='train_{}'.format(scene),
model=args.model, aug=args.aug, Buffer=True, dense_pred_flag=args.dense_pred, exp=args.exp_name)
buffer_dataset = datasetSs(args.data_path, args.dataset, args.scene, split='train_{}'.format(scene),
model=args.model, aug=False, Buffer=False, dense_pred_flag=args.dense_pred, exp=args.exp_name)
if i >= 7:
dataset = datasetTs(args.data_path, args.dataset, args.scene, split='train_{}'.format(scene),
model=args.model, aug=args.aug, Buffer=True, dense_pred_flag=args.dense_pred, exp=args.exp_name)
buffer_dataset = datasetTs(args.data_path, args.dataset, args.scene, split='train_{}'.format(scene),
model=args.model, aug=False, Buffer=False, dense_pred_flag=args.dense_pred, exp=args.exp_name)
trainloader = data.DataLoader(dataset, batch_size=args.batch_size, num_workers=4, shuffle=True)
buffer_trainloader = data.DataLoader(buffer_dataset, batch_size=args.batch_size, num_workers=4, shuffle=True)
# start training
args.n_epoch = int(np.ceil(args.n_iter * args.batch_size / len(dataset)))
#for epoch in range(start_epoch, start_epoch + args.n_epoch+1):
for epoch in range(1, args.n_epoch+1):
lr = args.init_lr
model.train()
train_loss_list = []
coord_loss_list = []
if args.model == 'hscnet':
lbl_1_loss_list = []
lbl_2_loss_list = []
for _, (data_ori, data_buffer) in enumerate(tqdm(trainloader)):
img, coord, mask, lbl_1, lbl_2, lbl_1_oh, lbl_2_oh, _ = data_ori
if mask.sum() == 0:
continue
optimizer.zero_grad()
img = img.to(device)
coord = coord.to(device)
mask = mask.to(device)
train_loss, coord_loss, lbl_1_loss, lbl_2_loss = loss(img, coord, mask, lbl_1, lbl_2, lbl_1_oh,
lbl_2_oh, model, reg_loss, cls_loss, device, w1, w2, w3)
# compute loss for buffer if not first scene
if i > 0 :
# sample a random minibatch from buffer dataloader
img_buff, coord_buff, mask_buff, lbl_1_buff, lbl_2_buff, lbl_1_oh_buff, lbl_2_oh_buff, _, dense_pred = data_buffer
if mask_buff.sum() == 0:
continue
img_buff = img_buff.to(device)
coord_buff = coord_buff.to(device)
mask_buff = mask_buff.to(device)
buff_loss = loss_buff_DK(img_buff, coord_buff, mask_buff, lbl_1_buff, lbl_2_buff, lbl_1_oh_buff,
lbl_2_oh_buff, model, reg_loss, cls_loss, device, w1, w2, w3, dense_pred=dense_pred)
train_loss+= 1 * buff_loss
coord_loss_list.append(coord_loss.item())
if args.model == 'hscnet':
lbl_1_loss_list.append(lbl_1_loss.item())
lbl_2_loss_list.append(lbl_2_loss.item())
train_loss_list.append(train_loss.item())
train_loss.backward()
optimizer.step()
with open(args.save_path/args.log_summary, 'a') as logfile:
if args.model == 'hscnet':
logtt = 'task {}:Epoch {}/{} - lr: {} - reg_loss: {} - cls_loss_1: {}' \
' - cls_loss_2: {} - train_loss: {} '.format(scene,
epoch, args.n_epoch, lr, np.mean(coord_loss_list),
np.mean(lbl_1_loss_list), np.mean(lbl_2_loss_list),
np.mean(train_loss_list))
else:
logtt = 'Epoch {}/{} - lr: {} - reg_loss: {} - train_loss: {}' \
'\n'.format(
epoch, args.n_epoch, lr, np.mean(coord_loss_list),
np.mean(train_loss_list))
print(logtt)
logfile.write(logtt)
if epoch % int(np.floor(args.n_epoch / 1.)) == 0:
save_state(args.save_path, epoch, model, optimizer)
#start_epoch = epoch
# add buffer data
with torch.no_grad():
for i, (data_ori, data_buffer) in enumerate(tqdm(buffer_trainloader)):
img, coord, mask, lbl_1, lbl_2, lbl_1_oh, lbl_2_oh, frame = data_ori
if mask.sum() == 0:
continue
optimizer.zero_grad()
img = img.to(device)
coord = coord.to(device)
mask = mask.to(device)
if args.dense_pred:
# predictions
lbl_1 = lbl_1.to(device)
lbl_2 = lbl_2.to(device)
lbl_1_oh = lbl_1_oh.to(device)
lbl_2_oh = lbl_2_oh.to(device)
coord_pred, lbl_2_pred, lbl_1_pred = model(img, lbl_1_oh,
lbl_2_oh)
preds = (coord_pred, lbl_1_pred, lbl_2_pred)
if args.sampling == 'CoverageS':
buffer.add_bal_buff(frame, preds, i)
if args.sampling == 'Imgbal':
buffer.add_imb_buffer(frame, preds, i)
if args.sampling == 'Random':
buffer.add_buffer_dense(frame, preds)
else:
if args.sampling == 'CoverageS':
buffer.add_bal_buff(frame, nc=i)
if args.sampling == 'Imgbal':
buffer.add_imb_buffer(frame, nc=i)
if args.sampling == 'Random':
buffer.add_buffer_dense(frame)
save_state(args.save_path, epoch, model, optimizer)
def loss(img, coord, mask, lbl_1, lbl_2, lbl_1_oh, lbl_2_oh, model, reg_loss, cls_loss, device, w1, w2, w3, dense_pred=None):
lbl_1 = lbl_1.to(device)
lbl_2 = lbl_2.to(device)
lbl_1_oh = lbl_1_oh.to(device)
lbl_2_oh = lbl_2_oh.to(device)
coord_pred, lbl_2_pred, lbl_1_pred = model(img,lbl_1_oh,
lbl_2_oh)
lbl_1_loss = cls_loss(lbl_1_pred, lbl_1 , mask )
lbl_2_loss = cls_loss(lbl_2_pred, lbl_2 , mask )
coord_loss = reg_loss(coord_pred, coord , mask )
train_loss = w3*coord_loss + w1*lbl_1_loss + w2*lbl_2_loss
return train_loss, coord_loss, lbl_1_loss, lbl_2_loss
def loss_buff(img, coord, mask, lbl_1, lbl_2, lbl_1_oh, lbl_2_oh, model, reg_loss, cls_loss, device, w1, w2, w3, dense_pred=None):
lbl_1 = lbl_1.to(device)
lbl_2 = lbl_2.to(device)
lbl_1_oh = lbl_1_oh.to(device)
lbl_2_oh = lbl_2_oh.to(device)
coord_pred, lbl_2_pred, lbl_1_pred = model(img,lbl_1_oh,
lbl_2_oh)
lbl_1_loss = cls_loss(lbl_1_pred, lbl_1, mask)
lbl_2_loss = cls_loss(lbl_2_pred, lbl_2, mask)
coord_loss = reg_loss(coord_pred, coord, mask)
train_loss = w3 * coord_loss + w1 * lbl_1_loss + w2 * lbl_2_loss
if dense_pred:
dense_pred_lbl_1 = dense_pred[0].to(device)
dense_pred_lbl_2 = dense_pred[1].to(device)
dense_pred_coord = dense_pred[2].to(device)
L2_loss = nn.MSELoss()
buff_lbl_1_loss = L2_loss(lbl_1_pred, dense_pred_lbl_1)
buff_lbl_2_loss = L2_loss(lbl_2_pred, dense_pred_lbl_2)
buff_coord_loss = L2_loss(coord_pred, dense_pred_coord)
train_loss += 0.5 * (w1 * buff_lbl_1_loss + w2 * buff_lbl_2_loss + w3 * buff_coord_loss)
#return buff_lbl_1_loss, buff_lbl_2_loss
return train_loss
def loss_buff_DK(img, coord, mask, lbl_1, lbl_2, lbl_1_oh, lbl_2_oh, model, reg_loss, cls_loss, device, w1, w2, w3, dense_pred=None):
## teacher loss as a upper bound ##
lbl_1 = lbl_1.to(device)
lbl_2 = lbl_2.to(device)
lbl_1_oh = lbl_1_oh.to(device)
lbl_2_oh = lbl_2_oh.to(device)
coord_pred, lbl_2_pred, lbl_1_pred = model(img,lbl_1_oh,
lbl_2_oh)
lbl_1_loss = cls_loss(lbl_1_pred, lbl_1, mask)
lbl_2_loss = cls_loss(lbl_2_pred, lbl_2, mask)
coord_loss = reg_loss(coord_pred, coord, mask)
# student VS gt loss
if dense_pred:
train_loss = 0.5 * (w3 * coord_loss + w1 * lbl_1_loss + w2 * lbl_2_loss)
dense_pred_lbl_1 = dense_pred[0].to(device)
dense_pred_lbl_2 = dense_pred[1].to(device)
dense_pred_coord = dense_pred[2].to(device)
L2_loss = nn.MSELoss()
buff_lbl_1_loss = L2_loss(lbl_1_pred, dense_pred_lbl_1)
buff_lbl_2_loss = L2_loss(lbl_2_pred, dense_pred_lbl_2)
buff_coord_loss = L2_loss(coord_pred, dense_pred_coord)
# teacher loss
buff_teacher_loss = reg_loss(dense_pred_coord, coord, mask)
buff_student_loss = reg_loss(coord_pred, coord, mask)
if buff_student_loss > buff_teacher_loss:
train_loss += 0.5 * (w1 * buff_lbl_1_loss + w2 * buff_lbl_2_loss + w3 * buff_coord_loss)
else:
train_loss += 0.5 * (w1 * buff_lbl_1_loss + w2 * buff_lbl_2_loss)
else:
train_loss = (w3 * coord_loss + w1 * lbl_1_loss + w2 * lbl_2_loss)
return train_loss
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Hscnet")
parser.add_argument('--model', nargs='?', type=str, default='hscnet',
choices=('hscnet', 'scrnet'),
help='Model to use [\'hscnet, scrnet\']')
parser.add_argument('--dataset', nargs='?', type=str, default='7S',
choices=('7S', '12S', 'i7S', 'i12S', 'i19S',
'Cambridge'), help='Dataset to use')
parser.add_argument('--scene', nargs='?', type=str, default='heads',
help='Scene')
parser.add_argument('--n_iter', nargs='?', type=int, default=30000,
help='# of iterations (to reproduce the results from ' \
'the paper, 300K for 7S and 12S, 600K for ' \
'Cambridge, 900K for the combined scenes)')
parser.add_argument('--init_lr', nargs='?', type=float, default=5e-5,
help='Initial learning rate')
parser.add_argument('--batch_size', nargs='?', type=int, default=1,
help='Batch size')
parser.add_argument('--aug', nargs='?', type=str2bool, default=True,
help='w/ or w/o data augmentation')
parser.add_argument('--resume', nargs='?', type=str, default=None,
help='Path to saved model to resume from')
parser.add_argument('--data_path', required=True, type=str,
help='Path to dataset')
parser.add_argument('--log-summary', default='progress_log_summary.txt',
metavar='PATH',
help='txt where to save per-epoch stats')
parser.add_argument('--train_id', nargs='?', type=str, default='',
help='An identifier string'),
parser.add_argument('--dense_pred', nargs='?', type=str2bool, default=False,
help='store dense predictions in buffer')
parser.add_argument('--exp_name', nargs='?', type=str, default='exp',
help='store dense predictions in buffer')
parser.add_argument('--buffer_size', nargs='?', type=int, default=1024,
help='the length of buffer size')
parser.add_argument('--sampling', | |
>>> thread = api.determined_kill_experiment(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The experiment id. (required)
:return: V1KillExperimentResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.determined_kill_experiment_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.determined_kill_experiment_with_http_info(id, **kwargs) # noqa: E501
return data
def determined_kill_experiment_with_http_info(self, id, **kwargs): # noqa: E501
"""Kill an experiment. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.determined_kill_experiment_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The experiment id. (required)
:return: V1KillExperimentResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method determined_kill_experiment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `determined_kill_experiment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/api/v1/experiments/{id}/kill', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1KillExperimentResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def determined_kill_trial(self, id, **kwargs): # noqa: E501
"""Kill a trial. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.determined_kill_trial(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The trial id (required)
:return: V1KillTrialResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.determined_kill_trial_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.determined_kill_trial_with_http_info(id, **kwargs) # noqa: E501
return data
def determined_kill_trial_with_http_info(self, id, **kwargs): # noqa: E501
"""Kill a trial. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.determined_kill_trial_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The trial id (required)
:return: V1KillTrialResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method determined_kill_trial" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `determined_kill_trial`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/api/v1/trials/{id}/kill', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1KillTrialResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def determined_patch_experiment(self, experiment_id, body, **kwargs): # noqa: E501
"""Patch an experiment's fields. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.determined_patch_experiment(experiment_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int experiment_id: The id of the experiment. (required)
:param V1Experiment body: Patched experiment attributes. (required)
:return: V1PatchExperimentResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.determined_patch_experiment_with_http_info(experiment_id, body, **kwargs) # noqa: E501
else:
(data) = self.determined_patch_experiment_with_http_info(experiment_id, body, **kwargs) # noqa: E501
return data
def determined_patch_experiment_with_http_info(self, experiment_id, body, **kwargs): # noqa: E501
"""Patch an experiment's fields. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.determined_patch_experiment_with_http_info(experiment_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int experiment_id: The id of the experiment. (required)
:param V1Experiment body: Patched experiment attributes. (required)
:return: V1PatchExperimentResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['experiment_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method determined_patch_experiment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'experiment_id' is set
if ('experiment_id' not in params or
params['experiment_id'] is None):
raise ValueError("Missing the required parameter `experiment_id` when calling `determined_patch_experiment`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `determined_patch_experiment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'experiment_id' in params:
path_params['experiment.id'] = params['experiment_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/api/v1/experiments/{experiment.id}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1PatchExperimentResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def determined_pause_experiment(self, id, **kwargs): # noqa: E501
"""Pause an experiment. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.determined_pause_experiment(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The experiment id. (required)
:return: V1PauseExperimentResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.determined_pause_experiment_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.determined_pause_experiment_with_http_info(id, **kwargs) # noqa: E501
return data
def determined_pause_experiment_with_http_info(self, id, **kwargs): # noqa: E501
"""Pause an experiment. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.determined_pause_experiment_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The experiment id. (required)
:return: V1PauseExperimentResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method determined_pause_experiment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `determined_pause_experiment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/api/v1/experiments/{id}/pause', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1PauseExperimentResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def determined_preview_hp_search(self, body, **kwargs): # noqa: E501
"""Preview hyperparameter search. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Interactive visualizaton for iCLIP-seq and RNA-seq data"""
import runpy
from argparse import ArgumentParser
from pathlib import Path
from xml.dom import minidom
import pickle
import os
import hashlib
import itertools
import pandas
from Bio import SeqIO
from Bio.Alphabet import generic_dna
import converter
import time
import gzip
import bz2
import zipfile
__author__ = "<NAME>"
start = time.time()
geneIndex = pandas.DataFrame()
plotColors = []
geneAnnotations = []
sequences = []
ensembl = False
geneDescriptions = None
descAvail = True
dropList = []
advancedDescriptions = None
subTables = None
dsElements = 0 # number of traces per dataset, i.e Rawdata+ bindingsites = 2
bsRawDFs = {}
rawAvail = False # Raw data available
bsProcDFs = {}
procAvail = False # proc data available
spliceSetNames = [[],[]]
spliceElements = 0
fileDict = {} # This dictionary will holde the file indexes for each dataset
spliceAvail = False # splice data available
spliceEventsAvail = False # splice events available
spliceEventsDFs = {}
spliceEventsElements = 0
spliceEventNames = [[],[]]
spliceEventTypes = []
dataSetNames = []
# Colors for dna sequence display
colorA = 'rgb(0, 150, 0)'
colorC = 'rgb(15,15,255)'
colorG = 'rgb(209, 113, 5)'
colorT = 'rgb(255, 9, 9)'
# Map for data track colors
colorMap = {}
# Create dictionary for coverage track colors
coverageColors = ['rgb(255,0,0)', 'rgb(255,165,0)','rgb(255,255,0)','rgb(0,0,255)', 'rgb(128,0,128)']
coverageColorDict = {}
eventColors = ['rgb(0,0,255)', 'rgb(255,0,0)', 'rgb(0,255,0)', 'rgb(128,0,128)', 'rgb(255,165,0)']
chunkSize = 10000
spliceEventColors = {} # dictionary for slice event colors
# Headers for the data files, files obviously need to conform to these headers for the visualization to work
bedHeader = ['chrom','chromStart','chromEnd','transID','score','strand','thickStart',
'thickEnd','itemRGB','blockCount','blockSizes','blockStarts']
bsHeader = ['chrom', 'chromStart','chromEnd','type', 'score', 'strand']
rawHeader = ['chrom','chromStart','chromEnd','count']
gtfheader = ['seqname', 'source', 'feature', 'start', 'end', 'score',
'strand', 'frame', 'attribute']
print('Loading gene annotation files.')
def validateGTF(df):
"""Validates gtf files. Returns True and an empty String if dataframe is valid,
else returns false and an error message.
Positional arguments:
df -- Dataframe to be validated
"""
try:
msg = ''
if df.isnull().values.any() == True:
msg = 'Missing values' + '\n' + str(df.isnull().sum())
return [False, msg]
if (all(x in ['+', '-'] for x in df['strand'].cat.categories.tolist())) != True:
msg = 'Bad strand symbol(has to be + or -'
return [False, msg]
return [True, msg]
except (TypeError, AttributeError, KeyError):
return [False, 'Not a valid dataframe']
def validateBed12(df):
"""Validates 12 column bed files. Returns True and an empty String if dataframe is valid,
else returns false and an error message.
Positional arguments:
df -- Dataframe to be validated
"""
try:
msg = ''
if df.isnull().values.any() == True:
msg = 'Missing values' + '\n' + str(df.isnull().sum())
return [False, msg]
if (all(x in ['+', '-'] for x in df['strand'].cat.categories.tolist())) != True:
msg = 'Bad strand symbol(has to be + or -'
return [False, msg]
if all(y.isdigit() for z in df['blockSizes'].map(lambda x: x.split(',')[:-1]).tolist()[0] for y in z ) == False:
msg = 'Column blockSizes contains non int values'
return [False, msg]
if all(y.isdigit() for z in df['blockStarts'].map(lambda x: x.split(',')[:-1]).tolist()[0] for y in z ) == False:
msg = 'Column blockStarts contains non int values'
return [False, msg]
return [True, msg]
except (TypeError, AttributeError, KeyError):
return [False, 'Not a valid dataframe']
def validateBedGraph(df):
"""Validates 4 column bedgraph files. Returns True and an empty String if dataframe is valid,
else returns false and an error message.
Positional arguments:
df -- Dataframe to be validated
"""
try:
msg = ''
if df.empty:
return [False, 'Not a valid dataframe']
if df.isnull().values.any() == True:
msg = 'Missing values' + '\n' + str(df.isnull().sum())
return [False, msg]
return [True, msg]
except (TypeError, AttributeError, KeyError):
return [False, 'Not a valid dataframe']
def validateBed(df):
"""Validates 6 column bed files. Returns True and an empty String if dataframe is valid,
else returns false and an error message.
Positional arguments:
df -- Dataframe to be validated
"""
try:
msg = ''
if df.isnull().values.any() == True:
msg = 'Missing values' + '\n' + str(df.isnull().sum())
return [False, msg]
if (all(x in ['+', '-'] for x in df['strand'].cat.categories.tolist())) != True:
msg = 'Bad strand symbol(has to be + or -)'
return [False, msg]
return [True, msg]
except (TypeError, AttributeError, KeyError):
return [False, 'Not a valid dataframe']
def isRGB(color):
""" Check if the provided strings match the required rgb format
Positional arguments:
color -- a single color string
"""
try:
if color[0:4] != 'rgb(':
return False
if color[-1:] != ')':
return False
if len(color[4:-1].split(',')) != 3:
return False
for i in color[4:-1].split(','):
if i.replace(' ', '').isdigit() == False:
return False
if int(i.replace(' ', '')) < 0 or int(i.replace(' ', '')) > 255:
return False
return True
except TypeError:
return False
def md5Gzip(fname):
hash_md5 = hashlib.md5()
with gzip.open(fname, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5
def md5Bz2(fname):
hash_md5 = hashlib.md5()
with bz2.open(fname, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5
def md5Zip(fname):
hash_md5 = hashlib.md5()
with zipfile.open(fname, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5
def loadAnnotations():
for idx, i in enumerate(geneAnnotationPaths):
try:
typeGuess = converter.check_input_file(str(i))
if typeGuess.header_present == True:
header = 1
else:
header = None
print('Loading file ' + str(idx+1) )
if typeGuess.file_type == 'BED12':
# try:
# if i.suffix.lower() =='.bed':
if typeGuess.zipped == True:
if typeGuess.zip_type == 'gzip':
checksum = md5Gzip(str(i))
elif typeGuess.zio_type == 'bzip2':
checksum = md5Bz2(str(i))
elif typeGuess.zio_type == 'zip':
checksum = md5Zip(str(i))
else:
checksum = hashlib.md5(open(str(i)).read().encode('utf-8'))
if checksums.get(str(i.stem), None) != checksum.hexdigest():
dtypes = {'chrom' : 'category', 'chromStart' : 'uint32','chromEnd': 'uint32','transID' : 'object','score' : 'int16','strand' : 'category','thickStart' : 'uint64',
'thickEnd' : 'uint64', 'blockCount' : 'uint32','blockSizes' : 'object','blockStarts' : 'object'}
df = pandas.read_csv(i, sep = '\t', compression='infer', comment = '#', names = bedHeader, dtype = dtypes, header = header)
df = df.join(geneIndex.set_index('transID'), on='transID')
validation = validateBed12(df)
if validation[0] == True:
geneAnnotations.append(df)
out = open(binFilePath + str(i.stem)+'.bin', 'wb')
pickle.dump(df, out)
out.close()
else:
print('Error in file ' + str(i) + ':')
print(validation[1])
checksums[str(i.stem)] = checksum.hexdigest()
else:
try:
df = pickle.load(open(binFilePath + str(i.stem)+'.bin', 'rb'))
geneAnnotations.append(df)
print('Loaded from pickle')
except IOError:
print('pickle not found, loading from raw file')
dtypes = {'chrom' : 'category', 'chromStart' : 'uint32','chromEnd': 'uint32','transID' : 'object','score' : 'int16','strand' : 'category','thickStart' : 'uint64',
'thickEnd' : 'uint64', 'blockCount' : 'uint32','blockSizes' : 'object','blockStarts' : 'object'}
df = pandas.read_csv(i, sep = '\t', compression='infer', comment = '#', names = bedHeader, dtype = dtypes, header = header)
df = df.join(geneIndex.set_index('transID'), on='transID')
validation = validateBed12(df)
if validation[0] == True:
geneAnnotations.append(df)
out = open(binFilePath + str(i.stem)+'.bin', 'wb')
pickle.dump(df, out)
out.close()
else:
print('Error in file ' + str(i) + ':')
print(validation[1])
except UnicodeDecodeError:
print('Error decoding pickle binary file, will load from raw file instead')
dtypes = {'chrom' : 'category', 'chromStart' : 'uint32','chromEnd': 'uint32','transID' : 'object','score' : 'int16','strand' : 'category','thickStart' : 'uint64',
'thickEnd' : 'uint64', 'blockCount' : 'uint32','blockSizes' : 'object','blockStarts' : 'object'}
df = pandas.read_csv(i, sep = '\t', compression='infer', comment = '#', names = bedHeader, dtype = dtypes, header = header)
df = df.join(geneIndex.set_index('transID'), on='transID')
validation = validateBed12(df)
if validation[0] == True:
geneAnnotations.append(df)
out = open(binFilePath + str(i.stem)+'.bin', 'wb')
pickle.dump(df, out)
out.close()
else:
print('Error in file ' + str(i) + ':')
print(validation[1])
except ModuleNotFoundError:
print('Pickle was created using different package versions, will load from raw file instead')
dtypes = {'chrom' : 'category', 'chromStart' : 'uint32','chromEnd': 'uint32','transID' : 'object','score' : 'int16','strand' : 'category','thickStart' : 'uint64',
'thickEnd' : 'uint64', 'blockCount' : 'uint32','blockSizes' : 'object','blockStarts' : 'object'}
df = pandas.read_csv(i, sep = '\t', compression='infer', comment = '#', names = bedHeader, dtype = dtypes, header = header)
df = df.join(geneIndex.set_index('transID'), on='transID')
validation = validateBed12(df)
if validation[0] == True:
geneAnnotations.append(df)
out = open(binFilePath + str(i.stem)+'.bin', 'wb')
pickle.dump(df, out)
out.close()
else:
print('Error in file ' + str(i) + ':')
print(validation[1])
elif typeGuess.file_type == 'GTF':
# if i.suffix.lower() == '.gtf':
if typeGuess.zipped == True:
if typeGuess.zip_type == 'gzip':
checksum = md5Gzip(str(i))
elif typeGuess.zio_type == 'bzip2':
checksum = md5Bz2(str(i))
elif typeGuess.zio_type == 'zip':
checksum = md5Zip(str(i))
else:
checksum = hashlib.md5(open(str(i)).read().encode('utf-8'))
if checksums.get(str(i.stem), None) != checksum.hexdigest():
dtypes = {'seqname' : 'object', 'source' : 'object', 'feature' : 'object', 'start' : 'uint32', 'end': 'uint32', 'score' : 'object',
'strand' : 'category', 'frame' : 'object', 'attribute' : 'object'}
df = pandas.read_csv(i, sep = '\t', compression='infer', comment = '#', names = gtfheader, dtype = dtypes, header | |
#!/usr/bin/env python
"""
:mod:`disco.worker.classic.worker` -- Classic Disco Runtime Environment
=======================================================================
When a Job is constructed using the classic :class:`Worker` defined in this module,
Disco runs the :mod:`disco.worker.classic.worker` module for every job task.
This module reconstructs the :class:`Worker` on the node where it is run,
in order to execute the :term:`job functions` which were used to create it.
Classic Workers resolve all parameters using :meth:`~disco.worker.Worker.getitem`.
Thus, users can subclass :class:`Job` as a convenient way to specify fixed parameters.
For example, here's a simple distributed grep from the Disco ``examples/`` directory:
.. literalinclude:: ../../../examples/util/grep.py
"""
import sys
# In Python3, __pycache__ directories and .pyc files are created if
# needed on module imports in the job directory. When multiple tasks
# of the same job start executing in the same job directory, these
# tasks race in their creation. The resulting race conditions result
# in random errors in module imports, which cause task and job
# failures. It appears that Python3 (at least upto Python 3.2) does
# not correctly handle concurrent creation of __pycache__ and .pyc by
# independent processes. So we turn off the writing of .pyc files for
# Python3.
if sys.version_info[0] == 3:
sys.dont_write_bytecode = 1
import os
from disco import util, worker
from disco.worker.classic import external
from disco.worker.classic.func import * # XXX: hack so func fns dont need to import
from disco import JOBPACK_VERSION1
from disco.worker import Params
class Worker(worker.Worker):
"""
A :class:`disco.worker.Worker`, which additionally supports the following parameters,
to maintain the **Classic Disco Interface**:
:type map: :func:`disco.worker.classic.func.map`
:param map: a function that defines the map task.
:type map_init: :func:`disco.worker.classic.func.init`
:param map_init: initialization function for the map task.
This function is called once before the task starts.
.. deprecated:: 0.4
*map_init* has not been needed ever since
:class:`disco.worker.task_io.InputStream`s
were introduced.
Use *map_input_stream* and/or *map_reader* instead.
:type map_input_stream: sequence of :func:`disco.worker.task_io.input_stream`
:param map_input_stream: The given functions are chained together and the final resulting
:class:`disco.worker.task_io.InputStream` object is used
to iterate over input entries.
.. versionadded:: 0.2.4
:type map_output_stream: sequence of :func:`disco.worker.task_io.output_stream`
:param map_output_stream: The given functions are chained together and the
:meth:`disco.worker.task_io.OutputStream.add` method of the last
returned :class:`disco.worker.task_io.OutputStream` object is used
to serialize key, value pairs output by the map.
.. versionadded:: 0.2.4
:type map_reader: ``None`` or :func:`disco.worker.task_io.input_stream`
:param map_reader: Convenience function to define the last :func:`disco.worker.task_io.input_stream`
function in the *map_input_stream* chain.
If you want to use outputs of an earlier job as inputs,
use :func:`disco.worker.task_io.chain_reader` as the *map_reader*.
.. versionchanged:: 0.3.1
The default is ``None``.
:type combiner: :func:`disco.worker.classic.func.combiner`
:param combiner: called after the partitioning function, for each partition.
:type reduce: :func:`disco.worker.classic.func.reduce`
:param reduce: If no reduce function is specified, the job will quit after
the map phase has finished.
.. versionadded:: 0.3.1
Reduce now supports an alternative signature,
:func:`disco.worker.classic.func.reduce2`,
which uses an iterator instead
of ``out.add()`` to output results.
.. versionchanged:: 0.2
It is possible to define only *reduce* without *map*.
See also :ref:`reduceonly`.
:type reduce_init: :func:`disco.worker.classic.func.init`
:param reduce_init: initialization function for the reduce task.
This function is called once before the task starts.
.. deprecated:: 0.4
*reduce_init* has not been needed ever since
:class:`disco.worker.task_io.InputStream`s
were introduced.
Use *reduce_input_stream* and/or *reduce_reader* instead.
:type reduce_input_stream: sequence of :func:`disco.worker.task_io.output_stream`
:param reduce_input_stream: The given functions are chained together and the last
returned :class:`disco.worker.task_io.InputStream` object is
given to *reduce* as its first argument.
.. versionadded:: 0.2.4
:type reduce_output_stream: sequence of :func:`disco.worker.task_io.output_stream`
:param reduce_output_stream: The given functions are chained together and the last
returned :class:`disco.worker.task_io.OutputStream` object is
given to *reduce* as its second argument.
.. versionadded:: 0.2.4
:type reduce_reader: :func:`disco.worker.task_io.input_stream`
:param reduce_reader: Convenience function to define the last :func:`disco.worker.task_io.input_stream`
if *map* is specified.
If *map* is not specified,
you can read arbitrary inputs with this function,
similar to *map_reader*.
Default is :func:`disco.worker.task_io.chain_reader`.
.. versionadded:: 0.2
:type partition: :func:`disco.worker.classic.func.partition`
:param partition: decides how the map output is distributed to reduce.
Default is :func:`disco.worker.classic.func.default_partition`.
:type partitions: int or None
:param partitions: number of partitions, if any.
Default is ``1``.
:type sort: boolean
:param sort: flag specifying whether the intermediate results,
that is, input to the reduce function, should be sorted.
Sorting is most useful in ensuring that the equal keys are
consequent in the input for the reduce function.
Other than ensuring that equal keys are grouped together,
sorting ensures that keys are returned in the ascending order.
No other assumptions should be made on the comparison function.
The external program ``sort`` is used to sort the input on disk.
In-memory sort can easily be performed by the tasks themselves.
Default is ``False``.
:type sort_buffer_size: string
:param sort_buffer_size: how much memory can be used by external sort.
Passed as the '-S' option to Unix `sort` (see *man sort*).
Default is ``10%`` i.e. 10% of the total available memory.
:type params: object
:param params: object that is passed to worker tasks to store state
The object is serialized using the *pickle* module,
so it should be pickleable.
A convenience class :class:`Params` is provided that
provides an easy way to encapsulate a set of parameters.
:class:`Params` allows including functions in the parameters.
:param ext_params: if either map or reduce function is an external program,
typically specified using :func:`disco.util.external`,
this object is used to deliver parameters to the program.
See :mod:`disco.worker.classic.external`.
:type status_interval: int
:param status_interval: print "K items mapped / reduced"
for every Nth item.
Setting the value to 0 disables messages.
Increase this value, or set it to zero,
if you get "Message rate limit exceeded"
error due to system messages.
This might happen if your tasks are really fast.
Decrease the value if you want more messages or
you don't have that many data items.
Default is ``100000``.
"""
jobpack_version = JOBPACK_VERSION1
def defaults(self):
defaults = super(Worker, self).defaults()
defaults.update({'map': None,
'map_init': init,
'map_reader': None,
'map_input_stream': (map_input_stream, ),
'map_output_stream': (map_output_stream,
disco_output_stream),
'map_shuffle': None,
'combiner': None,
'partition': default_partition,
'partitions': 1,
'reduce': None,
'reduce_init': init,
'reduce_reader': chain_reader,
'reduce_input_stream': (reduce_input_stream, ),
'reduce_output_stream': (reduce_output_stream,
disco_output_stream),
'reduce_shuffle': None,
'ext_params': {},
'params': Params(),
'shuffle': None,
'sort': False,
'sort_buffer_size': '10%',
'status_interval': 100000,
'version': '.'.join(str(s) for s in sys.version_info[:2])})
return defaults
def jobdict(self, job, **jobargs):
"""
Creates :ref:`jobdict` for the :class:`Worker`.
Makes use of the following parameters, in addition to those
defined by the :class:`Worker` itself:
:type input: list of urls or list of list of urls
:param input: used to set :attr:`jobdict.input`.
Disco natively handles the following url schemes:
* ``http://...`` - any HTTP address
* ``file://...`` or no scheme - a local file.
The file must exist on all nodes where the tasks are run.
Due to these restrictions, this form has only limited use.
* ``tag://...`` - a tag stored in :ref:`DDFS`
* ``raw://...`` - pseudo-address: use the address itself as data.
* ``dir://...`` - used by Disco internally.
* ``disco://...`` - used by Disco internally.
.. seealso:: :mod:`disco.schemes`.
:type scheduler: dict
:param scheduler: directly sets :attr:`jobdict.scheduler`.
Uses :meth:`getitem` to resolve the values of parameters.
:return: the :term:`job dict`.
"""
from disco.util import isiterable, inputlist, ispartitioned, read_index
from disco.error import DiscoError
def get(key, default=None):
return self.getitem(key, job, jobargs, default)
has_map = bool(get('map'))
has_reduce = bool(get('reduce'))
reduce_shuffle = bool(get('reduce_shuffle'))
job_input = get('input', [])
has_save_results = get('save', False) or get('save_results', False)
if not isiterable(job_input):
raise DiscoError("Job 'input' is not a list of input locations,"
"or a list of such lists: {0}".format(job_input))
input = inputlist(job_input,
label=None if has_map else False,
settings=job.settings)
# -- nr_reduces --
# ignored if there is not actually a reduce specified
# XXX: master should always handle this
if has_map:
# partitioned map has N reduces; non-partitioned map has 1 reduce
nr_reduces = get('partitions') or 1
elif ispartitioned(input):
# no map, with partitions: len(dir://) specifies nr_reduces
nr_reduces = 1 + max(int(id)
for dir in input
for id, url, size in read_index(dir))
else:
# no map, without partitions can only have 1 reduce
nr_reduces = 1
jobdict = super(Worker, self).jobdict(job, **jobargs)
jobdict.update({'input': input,
'worker': self.bin,
'map?': has_map,
'reduce?': has_reduce,
'reduce_shuffle?': reduce_shuffle,
'nr_reduces': nr_reduces,
'save_results': has_save_results})
return jobdict
def jobzip(self, job, **jobargs):
jobzip = super(Worker, self).jobzip(job, **jobargs)
def get(key):
return | |
<reponame>cyrildarevo/genPassportScore<filename>working version coz cached/a3_GDPtable.py
#uses the first gdp that matches the country
#Ivory Coast, Sao Tome and Principe and Curacao may have been de accented
GDPtable = [{'Country': 'United States', 'GDP': 19390600.0},
{'Country': 'China', 'GDP': 12014610.0},
{'Country': 'Japan', 'GDP': 4872135.0},
{'Country': 'Germany', 'GDP': 3684816.0},
{'Country': 'United Kingdom', 'GDP': 2624529.0},
{'Country': 'India', 'GDP': 2611012.0},
{'Country': 'France', 'GDP': 2583560.0},
{'Country': 'Brazil', 'GDP': 2054969.0},
{'Country': 'Italy', 'GDP': 1937894.0},
{'Country': 'Canada', 'GDP': 1652412.0},
{'Country': 'South Korea', 'GDP': 1538030.0},
{'Country': 'Russia', 'GDP': 1527469.0},
{'Country': 'Australia', 'GDP': 1379548.0},
{'Country': 'Spain', 'GDP': 1313951.0},
{'Country': 'Mexico', 'GDP': 1149236.0},
{'Country': 'Indonesia', 'GDP': 1015411.0},
{'Country': 'Turkey', 'GDP': 849480.0},
{'Country': 'Netherlands', 'GDP': 825745.0},
{'Country': 'Saudi Arabia', 'GDP': 683827.0},
{'Country': 'Switzerland', 'GDP': 678575.0},
{'Country': 'Argentina', 'GDP': 637717.0},
{'Country': 'Taiwan', 'GDP': 579302.0},
{'Country': 'Sweden', 'GDP': 538575.0},
{'Country': 'Poland', 'GDP': 524886.0},
{'Country': 'Belgium', 'GDP': 494733.0},
{'Country': 'Thailand', 'GDP': 455378.0},
{'Country': 'Iran', 'GDP': 431920.0},
{'Country': 'Austria', 'GDP': 416845.0},
{'Country': 'Norway', 'GDP': 396457.0},
{'Country': 'United Arab Emirates', 'GDP': 377435.0},
{'Country': 'Nigeria', 'GDP': 376284.0},
{'Country': 'Israel', 'GDP': 350609.0},
{'Country': 'South Africa', 'GDP': 349299.0},
{'Country': 'Hong Kong', 'GDP': 341659.0},
{'Country': 'Ireland', 'GDP': 333994.0},
{'Country': 'Denmark', 'GDP': 324484.0},
{'Country': 'Singapore', 'GDP': 323902.0},
{'Country': 'Malaysia', 'GDP': 314497.0},
{'Country': 'Philippines', 'GDP': 313419.0},
{'Country': 'Colombia', 'GDP': 309197.0},
{'Country': 'Pakistan', 'GDP': 303993.0},
{'Country': 'Chile', 'GDP': 277042.0},
{'Country': 'Bangladesh', 'GDP': 261374.0},
{'Country': 'Finland', 'GDP': 253244.0},
{'Country': 'Egypt', 'GDP': 237073.0},
{'Country': 'Vietnam', 'GDP': 220408.0},
{'Country': 'Portugal', 'GDP': 218064.0},
{'Country': 'Peru', 'GDP': 215224.0},
{'Country': 'Czech Republic', 'GDP': 213189.0},
{'Country': 'Romania', 'GDP': 211315.0},
{'Country': 'Venezuela', 'GDP': 210085.0},
{'Country': 'New Zealand', 'GDP': 201485.0},
{'Country': 'Greece', 'GDP': 200690.0},
{'Country': 'Iraq', 'GDP': 197699.0},
{'Country': 'Algeria', 'GDP': 178287.0},
{'Country': 'Qatar', 'GDP': 166326.0},
{'Country': 'Kazakhstan', 'GDP': 160839.0},
{'Country': 'Hungary', 'GDP': 152284.0},
{'Country': 'Angola', 'GDP': 124209.0},
{'Country': 'Kuwait', 'GDP': 120351.0},
{'Country': 'Morocco', 'GDP': 109824.0},
{'Country': 'Ukraine', 'GDP': 109321.0},
{'Country': 'Ecuador', 'GDP': 102311.0},
{'Country': 'Puerto Rico', 'GDP': 98805.0},
{'Country': 'Slovakia', 'GDP': 95938.0},
{'Country': 'Sri Lanka', 'GDP': 87591.0},
{'Country': 'Ethiopia', 'GDP': 80874.0},
{'Country': 'Kenya', 'GDP': 79511.0},
{'Country': 'Syria', 'GDP': 77460.0},
{'Country': 'Guatemala', 'GDP': 75661.0},
{'Country': 'Dominican Republic', 'GDP': 75018.0},
{'Country': 'Oman', 'GDP': 74274.0},
{'Country': 'Myanmar', 'GDP': 66537.0},
{'Country': 'Luxembourg', 'GDP': 62393.0},
{'Country': 'Uruguay', 'GDP': 58415.0},
{'Country': 'Panama', 'GDP': 61838.0},
{'Country': 'Costa Rica', 'GDP': 58056.0},
{'Country': 'Sudan', 'GDP': 58239.0},
{'Country': 'Bulgaria', 'GDP': 56943.0},
{'Country': 'Croatia', 'GDP': 54516.0},
{'Country': 'Belarus', 'GDP': 54436.0},
{'Country': 'Tanzania', 'GDP': 51725.0},
{'Country': 'Lebanon', 'GDP': 51457.0},
{'Country': 'Macau', 'GDP': 49802.0},
{'Country': 'Slovenia', 'GDP': 48868.0},
{'Country': 'Uzbekistan', 'GDP': 47883.0},
{'Country': 'Lithuania', 'GDP': 47263.0},
{'Country': 'Ghana', 'GDP': 47032.0},
{'Country': 'Serbia', 'GDP': 41471.0},
{'Country': 'Democratic Republic of the Congo', 'GDP': 41441.0},
{'Country': 'Azerbaijan', 'GDP': 40670.0},
{'Country': 'Jordan', 'GDP': 40487.0},
{'Country': 'Ivory Coast', 'GDP': 40360.0},
{'Country': 'Tunisia', 'GDP': 40275.0},
{'Country': 'Turkmenistan', 'GDP': 37926.0},
{'Country': 'Bolivia', 'GDP': 37122.0},
{'Country': 'Bahrain', 'GDP': 34895.0},
{'Country': 'Cameroon', 'GDP': 34006.0},
{'Country': 'Libya', 'GDP': 31331.0},
{'Country': 'Latvia', 'GDP': 30319.0},
{'Country': 'Paraguay', 'GDP': 29619.0},
{'Country': 'El Salvador', 'GDP': 28023.0},
{'Country': 'Uganda', 'GDP': 26349.0},
{'Country': 'Estonia', 'GDP': 25973.0},
{'Country': 'Zambia', 'GDP': 25504.0},
{'Country': 'Nepal', 'GDP': 24472.0},
{'Country': 'Iceland', 'GDP': 23909.0},
{'Country': 'Papua New Guinea', 'GDP': 23617.0},
{'Country': 'Honduras', 'GDP': 22975.0},
{'Country': 'Cambodia', 'GDP': 22252.0},
{'Country': 'Cyprus', 'GDP': 21310.0},
{'Country': 'Afghanistan', 'GDP': 20889.0},
{'Country': 'Trinidad and Tobago', 'GDP': 20300.0},
{'Country': 'Bosnia and Herzegovina', 'GDP': 17457.0},
{'Country': 'Laos', 'GDP': 17152.0},
{'Country': 'Zimbabwe', 'GDP': 17105.0},
{'Country': 'Botswana', 'GDP': 16725.0},
{'Country': 'Yemen', 'GDP': 16511.0},
{'Country': 'Senegal', 'GDP': 16057.0},
{'Country': 'Georgia', 'GDP': 15230.0},
{'Country': 'Mali', 'GDP': 14998.0},
{'Country': 'Gabon', 'GDP': 14467.0},
{'Country': 'Jamaica', 'GDP': 14290.0},
{'Country': 'Nicaragua', 'GDP': 13692.0},
{'Country': 'Burkina Faso', 'GDP': 13187.0},
{'Country': 'Albania', 'GDP': 13001.0},
{'Country': 'Namibia', 'GDP': 12558.0},
{'Country': 'Mozambique', 'GDP': 12345.0},
{'Country': 'Mauritius', 'GDP': 12273.0},
{'Country': 'Malta', 'GDP': 12011.0},
{'Country': 'Brunei', 'GDP': 11963.0},
{'Country': 'Macedonia', 'GDP': 11416.0},
{'Country': 'Armenia', 'GDP': 11037.0},
{'Country': 'Mongolia', 'GDP': 10869.0},
{'Country': 'Madagascar', 'GDP': 10557.0},
{'Country': 'Equatorial Guinea', 'GDP': 10069.0},
{'Country': 'Chad', 'GDP': 9740.0},
{'Country': 'Benin', 'GDP': 9410.0},
{'Country': 'Guinea', 'GDP': 9183.0},
{'Country': 'Bahamas', 'GDP': 9127.0},
{'Country': 'Rwanda', 'GDP': 8918.0},
{'Country': 'Kosovo', 'GDP': 8883.0},
{'Country': 'Haiti', 'GDP': 8360.0},
{'Country': 'Moldova', 'GDP': 7945.0},
{'Country': 'Niger', 'GDP': 7892.0},
{'Country': 'Republic of Congo', 'GDP': 7799.0},
{'Country': 'Tajikistan', 'GDP': 7234.0},
{'Country': 'Kyrgyzstan', 'GDP': 7061.0},
{'Country': 'Malawi', 'GDP': 6261.0},
{'Country': 'Eritrea', 'GDP': 6050.0},
{'Country': 'Fiji', 'GDP': 5054.0},
{'Country': 'Mauritania', 'GDP': 4985.0},
{'Country': 'Barbados', 'GDP': 4821.0},
{'Country': 'Togo', 'GDP': 4797.0},
{'Country': 'Maldives', 'GDP': 4520.0},
{'Country': 'Montenegro', 'GDP': 4405.0},
{'Country': 'Swaziland', 'GDP': 4030.0},
{'Country': 'Sierra Leone', 'GDP': 3897.0},
{'Country': 'Suriname', 'GDP': 3665.0},
{'Country': 'Guyana', 'GDP': 3591.0},
{'Country': 'Burundi', 'GDP': 3393.0},
{'Country': 'South Sudan', 'GDP': 2870.0},
{'Country': 'Lesotho', 'GDP': 2721.0},
{'Country': 'Timor-Leste', 'GDP': 2716.0},
{'Country': 'Bhutan', 'GDP': 2321.0},
{'Country': 'Liberia', 'GDP': 2140.0},
{'Country': 'Djibouti', 'GDP': 2082.0},
{'Country': 'Central African Republic', 'GDP': 1992.0},
{'Country': 'Belize', 'GDP': 1819.0},
{'Country': 'Cape Verde', 'GDP': 1728.0},
{'Country': 'St. Lucia', 'GDP': 1717.0},
{'Country': 'San Marino', 'GDP': 1592.0},
{'Country': 'Antigua and Barbuda', 'GDP': 1535.0},
{'Country': 'Seychelles', 'GDP': 1479.0},
{'Country': 'Guinea-Bissau', 'GDP': 1295.0},
{'Country': 'Solomon Islands', 'GDP': 1273.0},
{'Country': 'Grenada', 'GDP': 1111.0},
{'Country': 'The Gambia', 'GDP': 1038.0},
{'Country': 'St. Kitts and Nevis', 'GDP': 939.0},
{'Country': 'Samoa', 'GDP': 844.0},
{'Country': 'Vanuatu', 'GDP': 837.0},
{'Country': 'St. Vincent and the Grenadines', 'GDP': 815.0},
{'Country': 'Comoros', 'GDP': 659.0},
{'Country': 'Dominica', 'GDP': 608.0},
{'Country': 'Tonga', 'GDP': 437.0},
{'Country': 'Sao Tome and Principe', 'GDP': 372.0},
{'Country': 'Federated States of Micronesia', 'GDP': 329.0},
{'Country': 'Palau', 'GDP': 321.0},
{'Country': 'Marshall Islands', 'GDP': 199.0},
{'Country': 'Kiribati', 'GDP': 186.0},
{'Country': 'Tuvalu', 'GDP': 40.0},
{'Country': 'United States', 'GDP': 19390604.0},
{'Country': 'European Union', 'GDP': 17277698.0},
{'Country': 'China', 'GDP': 12237700.0},
{'Country': 'Japan', 'GDP': 4872137.0},
{'Country': 'Germany', 'GDP': 3677439.0},
{'Country': 'United Kingdom', 'GDP': 2622434.0},
{'Country': 'India', 'GDP': 2597491.0},
{'Country': 'France', 'GDP': 2582501.0},
{'Country': 'Brazil', 'GDP': 2055506.0},
{'Country': 'Italy', 'GDP': 1934798.0},
{'Country': 'Canada', 'GDP': 1653043.0},
{'Country': 'Russia', 'GDP': 1577524.0},
{'Country': 'South Korea', 'GDP': 1530751.0},
{'Country': 'Australia', 'GDP': 1323421.0},
{'Country': 'Spain', 'GDP': 1311320.0},
{'Country': 'Mexico', 'GDP': 1149919.0},
{'Country': 'Indonesia', 'GDP': 1015539.0},
{'Country': 'Turkey', 'GDP': 851102.0},
{'Country': 'Netherlands', 'GDP': 826200.0},
{'Country': 'Saudi Arabia', 'GDP': 683827.0},
{'Country': 'Switzerland', 'GDP': 678887.0},
{'Country': 'Argentina', 'GDP': 637590.0},
{'Country': 'Sweden', 'GDP': 538040.0},
{'Country': 'Poland', 'GDP': 524510.0},
{'Country': 'Belgium', 'GDP': 492681.0},
{'Country': 'Thailand', 'GDP': 455221.0},
{'Country': 'Iran', 'GDP': 439514.0},
{'Country': 'Austria', 'GDP': 416596.0},
{'Country': 'Norway', 'GDP': 398832.0},
{'Country': 'United Arab Emirates', 'GDP': 382575.0},
{'Country': 'Nigeria', 'GDP': 375771.0},
{'Country': 'Israel', 'GDP': 350851.0},
{'Country': 'South Africa', 'GDP': 349419.0},
{'Country': 'Hong Kong', 'GDP': 341449.0},
{'Country': 'Ireland', 'GDP': 333731.0},
{'Country': 'Denmark', 'GDP': 324872.0},
{'Country': 'Singapore', 'GDP': 323907.0},
{'Country': 'Malaysia', 'GDP': 314500.0},
{'Country': 'Philippines', 'GDP': 313595.0},
{'Country': 'Colombia', 'GDP': 309191.0},
{'Country': 'Pakistan', 'GDP': 304952.0},
{'Country': 'Chile', 'GDP': 277076.0},
{'Country': 'Finland', 'GDP': 251885.0},
{'Country': 'Bangladesh', 'GDP': 249724.0},
{'Country': 'Egypt', 'GDP': 235369.0},
{'Country': 'Vietnam', 'GDP': 223864.0},
{'Country': 'Portugal', 'GDP': 217571.0},
{'Country': 'Czech Republic', 'GDP': 215726.0},
{'Country': 'Romania', 'GDP': 211803.0},
{'Country': 'Peru', 'GDP': 211389.0},
{'Country': 'New Zealand', 'GDP': 205853.0},
{'Country': 'Greece', 'GDP': 200288.0},
{'Country': 'Iraq', 'GDP': 197716.0},
{'Country': 'Algeria', 'GDP': 170371.0},
{'Country': 'Qatar', 'GDP': 167605.0},
{'Country': 'Kazakhstan', 'GDP': 159407.0},
{'Country': 'Hungary', 'GDP': 139135.0},
{'Country': 'Angola', 'GDP': 124209.0},
{'Country': 'Kuwait', 'GDP': 120126.0},
{'Country': 'Sudan', 'GDP': 117488.0},
{'Country': 'Ukraine', 'GDP': 112154.0},
{'Country': 'Morocco', 'GDP': 109139.0},
{'Country': 'Ecuador', 'GDP': 103057.0},
{'Country': 'Slovak Republic', 'GDP': 95769.0},
{'Country': 'Sri Lanka', 'GDP': 87175.0},
{'Country': 'Ethiopia', 'GDP': 80561.0},
{'Country': 'Dominican Republic', 'GDP': 75932.0},
{'Country': 'Guatemala', 'GDP': 75620.0},
{'Country': 'Kenya', 'GDP': 74938.0},
{'Country': 'Oman', 'GDP': 72643.0},
{'Country': 'Myanmar', 'GDP': 69322.0},
{'Country': 'Luxembourg', 'GDP': 62404.0},
{'Country': 'Panama', 'GDP': 61838.0},
{'Country': 'Costa Rica', 'GDP': 57057.0},
{'Country': 'Bulgaria', 'GDP': 56832.0},
{'Country': 'Uruguay', 'GDP': 56157.0},
{'Country': 'Croatia', 'GDP': 54849.0},
{'Country': 'Belarus', 'GDP': 54442.0},
{'Country': 'Tanzania', 'GDP': 52090.0},
{'Country': 'Lebanon', 'GDP': 51844.0},
{'Country': 'Libya', 'GDP': 50984.0},
{'Country': 'Macau', 'GDP': 50361.0},
{'Country': 'Slovenia', 'GDP': 48770.0},
{'Country': 'Uzbekistan', 'GDP': 48718.0},
{'Country': 'Ghana', 'GDP': 47330.0},
{'Country': 'Lithuania', 'GDP': 47168.0},
{'Country': 'Turkmenistan', 'GDP': 42355.0},
{'Country': 'Serbia', 'GDP': 41432.0},
{'Country': 'Azerbaijan', 'GDP': 40748.0},
{'Country': 'Ivory Coast', 'GDP': 40389.0},
{'Country': 'Tunisia', 'GDP': 40257.0},
{'Country': 'Jordan', 'GDP': 40068.0},
{'Country': 'Bolivia', 'GDP': 37509.0},
{'Country': 'Democratic Republic of the Congo', 'GDP': 37241.0},
{'Country': 'Bahrain', 'GDP': 35307.0},
{'Country': 'Cameroon', 'GDP': 34799.0},
{'Country': 'Latvia', 'GDP': 30264.0},
{'Country': 'Paraguay', 'GDP': 29735.0},
{'Country': 'Estonia', 'GDP': 25921.0},
{'Country': 'Uganda', 'GDP': 25891.0},
{'Country': 'Zambia', 'GDP': 25809.0},
{'Country': 'El Salvador', 'GDP': 24805.0},
{'Country': 'Nepal', 'GDP': 24472.0},
{'Country': 'Iceland', | |
a Renewable Energy Source?</p>
<ul style="list-style-type: none">
<li>
<input type="radio" name="q2" id="q2c1" value="wrong">
<label for="q2c1">Yes, it is Renewable.</label>
</li>
<li>
<input type="radio" name="q2" id="q2c2" value="right">
<label for="q2c2">No, it is a Non-Renewable Energy Source. </label>
</li>
</ul>
<button id="q2Btn">Submit</button>
<p id="q2AnswerStatus"></p>
</div>
<!-- Question 3 -->
<div>
<p>Is Natural Gas a Renewable Energy Source?</p>
<ul style="list-style-type: none">
<li>
<input type="radio" name="q3" id="q3c1" value="wrong">
<label for="q3c1">Yes, it is Renewable.</label>
</li>
<li>
<input type="radio" name="q3" id="q3c2" value="right">
<label for="q3c2">No, it is a Non-Renewable Energy Source. </label>
</li>
</ul>
<button id="q3Btn">Submit</button>
<p id="q3AnswerStatus"></p>
</div>
<!-- Question 4 -->
<div>
<p>Is Coal Energy a Renewable Energy Source?</p>
<ul style="list-style-type: none">
<li>
<input type="radio" name="q4" id="q4c1" value="wrong">
<label for="q4c1">Yes, it is Renewable.</label>
</li>
<li>
<input type="radio" name="q4" id="q4c2" value="right">
<label for="q4c2">No, it is a Non-Renewable Energy Source. </label>
</li>
</ul>
<button id="q4Btn">Submit</button>
<p id="q4AnswerStatus"></p>
</div>
<!-- Question 5 -->
<div>
<p>Is Biomass Energy a Renewable Energy Source?</p>
<ul style="list-style-type: none">
<li>
<input type="radio" name="q5" id="q5c1" value="right">
<label for="q5c1">Yes, it is Renewable.</label>
</li>
<li>
<input type="radio" name="q5" id="q5c2" value="wrong">
<label for="q5c2">No, it is a Non-Renewable Energy Source. </label>
</li>
</ul>
<button id="q5Btn">Submit</button>
<p id="q5AnswerStatus"></p>
</div>
<!-- Question 6 -->
<div>
<p>Is Wind Energy a Renewable Energy Source?</p>
<ul style="list-style-type: none">
<li>
<input type="radio" name="q6" id="q6c1" value="right">
<label for="q6c1">Yes, it is Renewable.</label>
</li>
<li>
<input type="radio" name="q6" id="q6c2" value="wrong">
<label for="q6c2">No, it is a Non-Renewable Energy Source. </label>
</li>
</ul>
<button id="q6Btn">Submit</button>
<p id="q6AnswerStatus"></p>
</div>
<!-- Question 7 -->
<div>
<p>Is Water Energy a Renewable Energy Source?</p>
<ul style="list-style-type: none">
<li>
<input type="radio" name="q7" id="q7c1" value="right">
<label for="q7c1">Yes, it is Renewable.</label>
</li>
<li>
<input type="radio" name="q7" id="q7c2" value="wrong">
<label for="q7c2">No, it is a Non-Renewable Energy Source. </label>
</li>
</ul>
<button id="q7Btn">Submit</button>
<p id="q7AnswerStatus"></p>
</div>
<!-- Question 8 -->
<div>
<p>Is Nuclear Energy a Renewable Energy Source?</p>
<ul style="list-style-type: none">
<li>
<input type="radio" name="q8" id="q8c1" value="wrong">
<label for="q8c1">Yes, it is Renewable.</label>
</li>
<li>
<input type="radio" name="q8" id="q8c2" value="right">
<label for="q8c2">No, it is a Non-Renewable Energy Source. </label>
</li>
</ul>
<button id="q8Btn">Submit</button>
<p id="q8AnswerStatus"></p>
</div>
<!-- Question 9 -->
<div>
<p>Is Geothermal Energy a Renewable Energy Source?</p>
<ul style="list-style-type: none">
<li>
<input type="radio" name="q9" id="q9c1" value="right">
<label for="q9c1">Yes, it is Renewable.</label>
</li>
<li>
<input type="radio" name="q9" id="q9c2" value="wrong">
<label for="q9c2">No, it is a Non-Renewable Energy Source. </label>
</li>
</ul>
<button id="q9Btn">Submit</button>
<p id="q9AnswerStatus"></p>
</div>
<script>
// Question 1
// This looks at which question is being checked, pass in the buttons id
document.getElementById("q1Btn").onclick = function () {
// This if statment is used for the correct answer, in this case choice 3 is correct
if (document.getElementById("q1c1").checked) {
// "Correct Answer" field is where you can add any text to be displayed when it is correct
document.getElementById("q1AnswerStatus").innerHTML = "Correct Answer!";
} else {
// "Wrong Answer" field is where you can add any text to be displayed when it is wrong
document.getElementById("q1AnswerStatus").innerHTML = "Wrong Answer :(";
}
};
// Question 2
document.getElementById("q2Btn").onclick = function () {
if (document.getElementById("q2c2").checked) {
document.getElementById("q2AnswerStatus").innerHTML = "Correct Answer!";
} else {
document.getElementById("q2AnswerStatus").innerHTML = "Wrong Answer :(";
}
};
// Question 3
document.getElementById("q3Btn").onclick = function () {
if (document.getElementById("q3c2").checked) {
document.getElementById("q3AnswerStatus").innerHTML = "Correct Answer!";
} else {
document.getElementById("q3AnswerStatus").innerHTML = "Wrong Answer :(";
}
}; // Question 4
document.getElementById("q4Btn").onclick = function () {
if (document.getElementById("q4c2").checked) {
document.getElementById("q4AnswerStatus").innerHTML = "Correct Answer!";
} else {
document.getElementById("q4AnswerStatus").innerHTML = "Wrong Answer :(";
}
}; // Question 5
document.getElementById("q5Btn").onclick = function () {
if (document.getElementById("q5c1").checked) {
document.getElementById("q5AnswerStatus").innerHTML = "Correct Answer!";
} else {
document.getElementById("q5AnswerStatus").innerHTML = "Wrong Answer :(";
}
}; // Question 6
document.getElementById("q6Btn").onclick = function () {
if (document.getElementById("q6c1").checked) {
document.getElementById("q6AnswerStatus").innerHTML = "Correct Answer!";
} else {
document.getElementById("q6AnswerStatus").innerHTML = "Wrong Answer :(";
}
}; // Question 7
document.getElementById("q7Btn").onclick = function () {
if (document.getElementById("q7c1").checked) {
document.getElementById("q7AnswerStatus").innerHTML = "Correct Answer!";
} else {
document.getElementById("q7AnswerStatus").innerHTML = "Wrong Answer :(";
}
}; // Question 8
document.getElementById("q8Btn").onclick = function () {
if (document.getElementById("q8c2").checked) {
document.getElementById("q8AnswerStatus").innerHTML = "Correct Answer!";
} else {
document.getElementById("q8AnswerStatus").innerHTML = "Wrong Answer :(";
}
}; // Question 9
document.getElementById("q9Btn").onclick = function () {
if (document.getElementById("q9c1").checked) {
document.getElementById("q9AnswerStatus").innerHTML = "Correct Answer!";
} else {
document.getElementById("q9AnswerStatus").innerHTML = "Wrong Answer :(";
}
};
</script>
## The Good and Bad Traits of Energy Sources
Now that we understand each of the energy sources, it is important to weigh the good and the bad traits of each energy source. Efficient means the energy technique is achieving maximum productivity with minimum wasted effort or expense. Note that the bad traits of an energy source are usually negative side effects that we are trying to lessen or prevent while gathering usable energy.
<img src="https://thesolarscoop.com/wp-content/uploads/2018/03/Solar.jpg" style="margin: 0 auto; width: 1000px;">
#### Source Image: EcoFasten, March 2018. Retrieved from https://thesolarscoop.com/wp-content/uploads/2018/03/Solar
%%html
<head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 5px;
text-align: left;
}
</style>
</head>
<body>
<h1>Solar</h1>
<p></p>
<table style="width:100%" table align="left">
<tr>
<th style="text-align:center">Good Traits</th>
<th style="text-align:center">Bad Traits</th>
</tr>
<tr>
<td style="text-align:left">Solar energy has recently experienced decreasing costs and high public support. </td>
<td style="text-align:left">Solar energy is intermittent, i.e. electricity production is dependent on sunlight.</td>
</tr>
<tr>
<td style="text-align:left">Low CO2 emissions.</td>
<td style="text-align:left">Expensive but in recent years the cost of solar energy equipment has decreased.</td>
</tr>
<tr>
<td style="text-align:left">Easy to install, little operation and maintenance work.</td>
<td style="text-align:left">Forecasts are more unpredictable in comparison to fossil fuels (but better than wind).</td>
</tr>
</table>
<h3></h3>
<p></p>
</body>
</html>
from IPython.display import Image
Image(url= "https://ak5.picdn.net/shutterstock/videos/17748445/thumb/5.jpg", width=1000, height=300)
#### Source Image: Shutterstock, n.d. Retrieved from https://www.shutterstock.com/video/clip-17748445-close-up-industrial-oil-pump-jack-working
%%html
<head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 5px;
text-align: left;
}
</style>
</head>
<body>
<h1>Oil</h1>
<p></p>
<table style="width:100%">
<tr>
<th style="text-align:center">Good Traits</th>
<th style="text-align:center">Bad Traits</th>
</tr>
<tr>
<td style="text-align:left">Oil is cheap to produce and refine. </td>
<td style="text-align:left">Burning oil for electricity is a major source of air pollution on Earth and leads to health concerns and environmental damage. </td>
</tr>
<tr>
<td style="text-align:left">Unlike the renewable energy sources such as solar and wind energy that are weather dependent sources of power, Oil represents a reliable, ready-to-use source of energy.</td>
<td style="text-align:left">Burning oil for energy releases harmful gases into the atmosphere such as carbon dioxide (CO2), carbon monoxide (CO), nitrogen oxides (NOx), and sulfur dioxide (SO2, causes acid rain). </td>
</tr>
<tr>
<td></td>
<td style="text-align:left">Despite the fact that oil energy can get jobs done in a less expensive way, it is not a renewable source of energy. There will come a time when we run out of supply.</td>
</tr>
</table>
<h3></h3>
<p></p>
</body>
</html>
from IPython.display import Image
Image(filename="images/gasmap.jpg", width=1000, height=300)
#### Source Image: Studentenergy, n.d. Retrieved from https://www.studentenergy.org/topics/natural-gas
%%html
<head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 5px;
text-align: left;
}
</style>
</head>
<body>
<h1>Natural Gas</h1>
<p></p>
<table style="width:100%">
<tr>
<th style="text-align:center">Good Traits</th>
<th style="text-align:center">Bad Traits</th>
</tr>
<tr>
<td style="text-align:left">Emits the least CO2 compared to the other forms of non-renewable fossil fuels.</td>
<td style="text-align:left">Gas drilling has a negative impact on the environment.</td>
</tr>
<tr>
<td style="text-align:left"> Natural gas hot water heaters typically heat water twice as fast as electric heaters.</td>
<td style="text-align:left">Some regions that sell natural gas face political instability. This usually occurs when a country is dependent on natural gas as their only source of income. </td>
</tr>
<tr>
<td></td>
<td style="text-align:left">Natural gas is the more expensive energy source in comparison to other fossil fuels.</td>
</tr>
</table>
<h3></h3>
<p></p>
</body>
</html>
from IPython.display import Image
Image(url= "https://images.theconversation.com/files/125332/original/image-20160606-26003-1hjtcr5.jpg?ixlib=rb-1.1.0&q=45&auto=format&w=496&fit=clip", width=1000, height=100)
#### Source Image: The Conversation, June 2016. Retrieved from http://theconversation.com/is-coal-the-only-way-to-deal-with-energy-poverty-in-developing-economies-54163
%%html
<head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 5px;
text-align: left;
}
</style>
</head>
<body>
<h1>Coal</h1>
</p>
<table style="width:100%">
<tr>
<th style="text-align:center">Good Traits</th>
<th style="text-align:center">Bad Traits</th>
</tr>
<tr>
| |
# Partition the list in-place, and return the position of the pivot element.
# # debugcode
# print('partition', li, start, end)
if end is None:
end = len(li) - 1
left, right = start, end - 1
while left < right:
# # debugcode
# print(' ', li, left, right)
# We use the last elem as the pivot.
if li[left] <= li[end]:
# Increment left pointer if the elem <= pivot
left += 1
elif li[right] > li[end]:
# Decrement right pointer if the elem > pivot
right -= 1
else:
# Then li[left] > pivot AND li[right] <= pivot, so we swap them
li[left], li[right] = li[right], li[left]
# # debugcode
# print(' ', li, left, right)
# At this point, we must have left=right, ie the two pointers have overlapped
if li[left] > li[end]:
li[left], li[end] = li[end], li[left]
return left
else:
return end
def quicksort_using_quickselect(nums: Sequence) -> List:
"""Quicksort non-descendingly using :py:meth:`sorting.divide_and_conquer.quickselect`.
Return a new list, leaving the original `nums` intact.
Note
----
For large lists, this method is too slow to be feasible. This is only here to show one application of :py:meth:`sorting.divide_and_conquer.quickselect`.
"""
nums = nums.copy()
nums_sorted = []
for i in range(len(nums)):
# We don't care about the returned index of quickselect; we are only interested
# in the permutation quickselect() has brought about on `nums`.
_ = quickselect(nums, 0, None, i)
nums_sorted.append(nums[i])
return nums_sorted
def quickselect(
li: Sequence, left: int = 0, right: Optional[int] = None, n: int = 0
) -> int:
"""Return the index of such an element in the original `li`, as is the `n`'th element of this list when sorted non-descendingly (`n` starts from 0).
Warning
-------
This function partially sorts the input `li`, so do back it up beforehand.
"""
if right is None:
right = len(li) - 1
while True:
if left >= right:
return left
pivot_index = _get_pivot_index(li, left, right)
pivot_index = _trisection(li, left, right, pivot_index, n)
if n == pivot_index:
return n
elif n < pivot_index:
right = pivot_index - 1
else:
left = pivot_index + 1
n -= pivot_index
def _get_pivot_index(li: Sequence, left: int, right: int) -> int:
# Get the index of the median-of-medians of `li`, by dividing `li` into groups of
# <= 5 elems,
# and then computing the median of each group, then recursively compute the true
# median of the int(n/5) medians found in the previous step.
# Note: this function calls quickselect(), so this is a mutual recursion.
if right - left < 5:
# For <= 5 elems, just get median
return _median_of_less_than_five(li, left, right)
for i in range(left, right, 5):
# Each group is li[i:subright+1]
subright = min(i + 4, right)
median5 = _median_of_less_than_five(li, i, subright)
# Move all median5's to the left of `li`, ie to li[l], li[l+1], etc
# Note: double slash (//) is equivalent to int().
# The difference: float//int returns float, while int(f/i) returns int.
li[median5], li[left + (i-left)//5] = li[left + (i-left)//5], li[median5]
# Now we have all the int(n/5) median5's in li[left:x]. We need to find the index
# of the mid'th largest number in this li[left:x], ie its median
mid = int((right-left)/10) + left + 1
return quickselect(li, left, left + int((right-left)/5), mid)
def _median_of_less_than_five(li: Sequence, left: int, right: int) -> int:
# Return the index of the median of the ASC SORTED `li[left:right+1]` which is a
# group of at most five elems, using insertion sort.
# Note: This function performs in-place sorting of li[l:r+1].
i = left + 1
while i <= right:
j = i
while j > left and li[j-1] > li[j]:
li[j-1], li[j] = li[j], li[j-1]
j -= 1
i += 1
return int((left + right) / 2)
def _trisection(li: Sequence, left: int, right: int, pivot_index: int, n: int) -> int:
# Group li[left:right+1] into three parts: those < li[p_i], those = li[p_i], and
# those
# > li[p_i], ie a three-way partition, and identify which part the n'th largest
# elem of the original `li` is in.
# This ensures that the median-of-medians maintains linear execution time in a case
# of many all-coincident elems.
pivot_value = li[pivot_index]
# Move pivot elem to rightmost (temporarily, so that it will not be overwritten)
li[pivot_index], li[right] = li[right], li[pivot_index]
index_smaller = left
for i in range(left, right):
if li[i] < pivot_value:
# Move all elems that < pivot to left of the pivot
li[index_smaller], li[i] = li[i], li[index_smaller]
index_smaller += 1
index_equal = index_smaller
for i in range(index_smaller, right):
if li[i] == pivot_value:
# Move all elems that = pivot right after the smaller elems
li[index_equal], li[i] = li[i], li[index_equal]
index_equal += 1
# Move pivot to its final place, ie right after the equal elems (as last of them)
li[index_equal], li[right] = li[right], li[index_equal]
if n < index_smaller:
# Then n'th largest elem should be in [0:index_smaller+1]
return index_smaller
if n <= index_equal:
return n
return index_equal
# Notebook class and custom comparisons
class Notebook:
"""Notebook class storing title, username, likes."""
def __init__(self, title, username, likes) -> None:
self.title = title
self.username = username
self.likes = likes
def __repr__(self) -> str:
"""Representation of Notebook."""
return f"Notebook <\"{self.username}/{self.title}\", {self.likes} likes>\n"
def bubble_sort_with_compare(nums: Sequence, compare: Callable[..., str]) -> List:
"""Bubble sort that accepts a custom comparison function.
Return a new list, leaving the original `nums` intact.
"""
nums = nums.copy()
for _ in range(len(nums) - 1):
# Repeat the process, since we need to go back to the start again and again
for i in range(len(nums) - 1):
if compare(nums[i], nums[i+1]) == 'greater':
# Swap them
nums[i], nums[i+1] = nums[i+1], nums[i]
return nums
def insertion_sort_with_compare(nums: Sequence, compare: Callable[..., str]) -> List:
"""Perform insertion sort that accepts a custom comparison function.
Return a new list, leaving the original `nums` intact.
"""
nums = nums.copy()
for i in range(1, len(nums)):
# Note: the above line can use `for i in range(1,len(nums))` too, since for i=0,
# the loop simply pops nums[0] out to cur and then put it back to [0], nothing.
# list.pop(i) removes and returns the ith elem (starting with 0)
current = nums.pop(i)
j = i - 1
while j >= 0 and compare(current, nums[j]) == 'lesser':
j -= 1
# When current >= nums[j], this is the place to be.
# Note: list.insert(k) inserts BEFORE the kth elem
nums.insert(j + 1, current)
return nums
def merge_sort_with_compare(li: Sequence, compare: Callable[..., str]) -> List:
"""Merge sort that accepts a custom comparison function as the sorting criterion.
Return a new list, leaving the original `nums` intact.
"""
if len(li) < 2:
return li
mid = len(li) // 2
return _merge_with_compare(
merge_sort_with_compare(li[:mid], compare=compare),
merge_sort_with_compare(li[mid:], compare=compare),
compare
)
def _merge_with_compare(left: Sequence, right: Sequence, compare: Callable[..., str]):
# Similar to _merge(), but with a custom comparison func.
i, j, merged = 0, 0, []
while i < len(left) and j < len(right):
compare_result = compare(left[i], right[j])
if compare_result == 'lesser' or compare_result == 'equal':
merged.append(left[i])
i += 1
else:
merged.append(right[j])
j += 1
return merged + left[i:] + right[j:]
def compare_likes_desc(nb1: Notebook, nb2: Notebook) -> str:
"""Compare the likes of two notebooks in descending order, ie the more the likes, the lesser the book gets."""
if nb1.likes > nb2.likes:
return 'lesser'
elif nb1.likes == nb2.likes:
return 'equal'
else:
return 'greater'
def compare_titles(nb1: Notebook, nb2: Notebook) -> str:
"""Compare the titles of two notebooks in alphabetical order."""
if nb1.title < nb2.title:
return 'lesser'
elif nb1.title == nb2.title:
return 'equal'
else:
return 'greater'
##########################################
### Test client
# Bubble sort, insertion sort
jovian.evaluate_test_cases(func=bubble_sort, test_cases=tests[:-1])
# jovian.evaluate_test_case(func=bubble_sort, test_case=tests[-1])
jovian.evaluate_test_cases(func=insertion_sort, test_cases=tests[:-1])
# jovian.evaluate_test_case(func=insertion_sort, test_case=tests[-1])
# Merge sort
print(merge_sort([5, -12, 2, 6, 1, 23, 7, 7, -12], has_display=True))
jovian.evaluate_test_cases(func=_merge_sort_for_testing, test_cases=tests)
# Next line shows that merge sort is much | |
self.apply_mod = NodeApplyModule(in_features, out_features, activation, bias=bias)
def _sum(self, nodes: dgl.NodeBatch):
"""Take an average over all neighbor node features hu and use it to
overwrite the original node feature."""
m = nodes.mailbox['m']
accum = torch.sum(m, 1)
return {'h': accum}
def _mean(self, nodes: dgl.NodeBatch):
"""Take an average over all neighbor node features hu and use it to
overwrite the original node feature."""
m = nodes.mailbox['m']
mean = torch.mean(m, 1)
return {'h': mean}
def forward(self, text, graphs):
hidden = text
batched_graph = dgl.batch(graphs)
feature = hidden.view([-1, hidden.size()[-1]])
if feature.size()[0] != batched_graph.number_of_nodes():
print('error')
batched_graph.ndata['h'] = feature
gcn_msg = dgl_fn.copy_src(src='h', out='m')
gcn_reduce = self._mean
batched_graph.update_all(gcn_msg, gcn_reduce)
batched_graph.apply_nodes(func=self.apply_mod)
ug = dgl.unbatch(batched_graph)
output = [torch.unsqueeze(g.ndata.pop('h'), 0).to(self.opt['device']) for g in ug]
output = torch.cat(output, 0)
return output
class DglGraphConvolutionForAspectCategory(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, opt, bias=True, activation=F.relu):
super().__init__()
self.opt = opt
self.in_features = in_features
self.out_features = out_features
self.apply_mod = NodeApplyModule(in_features, out_features, activation, bias=bias)
def _sum(self, nodes: dgl.NodeBatch):
"""Take an average over all neighbor node features hu and use it to
overwrite the original node feature."""
m = nodes.mailbox['m']
accum = torch.sum(m, 1)
return {'h': accum}
def _mean(self, nodes: dgl.NodeBatch):
"""Take an average over all neighbor node features hu and use it to
overwrite the original node feature."""
m = nodes.mailbox['m']
mean = torch.mean(m, 1)
return {'h': mean}
def forward(self, text, graphs):
hidden = text
batched_graph = dgl.batch(graphs)
feature = hidden.view([-1, hidden.size()[-1]])
if feature.size()[0] != batched_graph.number_of_nodes():
print('error')
batched_graph.ndata['h'] = feature
gcn_msg = dgl_fn.copy_src(src='h', out='m')
gcn_reduce = self._mean
batched_graph.update_all(gcn_msg, gcn_reduce)
batched_graph.apply_nodes(func=self.apply_mod)
ug = dgl.unbatch(batched_graph)
output = [torch.unsqueeze(g.ndata.pop('h'), 0).to(self.opt['device']) for g in ug]
output = torch.cat(output, 0)
return output
class DglGraphAverage(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, opt, bias=True, activation=F.relu):
super().__init__()
self.opt = opt
self.in_features = in_features
self.out_features = out_features
def _sum(self, nodes: dgl.NodeBatch):
"""Take an average over all neighbor node features hu and use it to
overwrite the original node feature."""
m = nodes.mailbox['m']
accum = torch.sum(m, 1)
return {'h': accum}
def _mean(self, nodes: dgl.NodeBatch):
"""Take an average over all neighbor node features hu and use it to
overwrite the original node feature."""
m = nodes.mailbox['m']
mean = torch.mean(m, 1)
return {'h': mean}
def forward(self, text, graphs):
hidden = text
batched_graph = dgl.batch(graphs)
feature = hidden.view([-1, hidden.size()[-1]])
if feature.size()[0] != batched_graph.number_of_nodes():
print('error')
batched_graph.ndata['h'] = feature
gcn_msg = dgl_fn.copy_src(src='h', out='m')
gcn_reduce = self._mean
batched_graph.update_all(gcn_msg, gcn_reduce)
ug = dgl.unbatch(batched_graph)
output = [torch.unsqueeze(g.ndata.pop('h'), 0).to(self.opt['device']) for g in ug]
output = torch.cat(output, 0)
return output
class DglGraphAverageForAspectCategory(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, opt, bias=True, activation=F.relu):
super().__init__()
self.opt = opt
self.in_features = in_features
self.out_features = out_features
def _sum(self, nodes: dgl.NodeBatch):
"""Take an average over all neighbor node features hu and use it to
overwrite the original node feature."""
m = nodes.mailbox['m']
accum = torch.sum(m, 1)
return {'h': accum}
def _mean(self, nodes: dgl.NodeBatch):
"""Take an average over all neighbor node features hu and use it to
overwrite the original node feature."""
m = nodes.mailbox['m']
mean = torch.mean(m, 1)
return {'h': mean}
def forward(self, text, graphs):
hidden = text
batched_graph = dgl.batch(graphs)
feature = hidden.view([-1, hidden.size()[-1]])
if feature.size()[0] != batched_graph.number_of_nodes():
print('error')
batched_graph.ndata['h'] = feature
gcn_msg = dgl_fn.copy_src(src='h', out='m')
gcn_reduce = self._mean
batched_graph.update_all(gcn_msg, gcn_reduce)
ug = dgl.unbatch(batched_graph)
output = [torch.unsqueeze(g.ndata.pop('h'), 0).to(self.opt['device']) for g in ug]
output = torch.cat(output, 0)
return output
class DglGraphAttentionForAspectCategory(nn.Module):
"""
计算在所有aspect的attention下孩子节点的权重,所有权重求和然后归一化,即得到最终的
孩子节点权重
"""
def __init__(self, in_features, out_features, aspect_attentions, opt, bias=True, activation=F.relu):
super().__init__()
self.opt = opt
self.aspect_attentions: List[AttentionInHtt] = aspect_attentions
self.in_features = in_features
self.out_features = out_features
def element_wise_mul(self, input1, input2, return_not_sum_result=False):
feature_list = []
for feature_1, feature_2 in zip(input1, input2):
feature_2 = feature_2.unsqueeze(1)
feature_2 = feature_2.expand_as(feature_1)
feature = feature_1 * feature_2
feature = feature.unsqueeze(0)
feature_list.append(feature)
output = torch.cat(feature_list, 0)
result = torch.sum(output, 1)
if return_not_sum_result:
return result, output
else:
return result
def _message_func(self, edges: dgl.EdgeBatch):
result = {'m': edges.src['h'], 'm_sentiment': edges.src['h_sentiment']}
return result
def _sum(self, nodes: dgl.NodeBatch):
"""Take an average over all neighbor node features hu and use it to
overwrite the original node feature."""
m = nodes.mailbox['m']
accum = torch.sum(m, 1)
return {'h': accum}
def _mean(self, nodes: dgl.NodeBatch):
"""Take an average over all neighbor node features hu and use it to
overwrite the original node feature."""
m = nodes.mailbox['m']
mean = torch.mean(m, 1)
return {'h': mean}
def _attention(self, nodes: dgl.NodeBatch):
"""Take an average over all neighbor node features hu and use it to
overwrite the original node feature."""
m = nodes.mailbox['m']
alphas = []
for i in range(len(self.aspect_attentions)):
aspect_attention = self.aspect_attentions[i]
alpha = aspect_attention(m, None)
alphas.append(alpha.unsqueeze(1))
alpha_cat = torch.cat(alphas, dim=1)
alpha_final = torch.mean(alpha_cat, dim=1)
h = self.element_wise_mul(m, alpha_final, return_not_sum_result=False)
m_sentiment = nodes.mailbox['m_sentiment']
h_sentiment = self.element_wise_mul(m_sentiment, alpha_final, return_not_sum_result=False)
return {'h': h, 'h_sentiment': h_sentiment}
def forward(self, aspect_representation, graphs, sentiment_representation=None):
if sentiment_representation is None:
sentiment_representation_flag = False
sentiment_representation = aspect_representation
else:
sentiment_representation_flag = True
hidden = aspect_representation
batched_graph = dgl.batch(graphs)
feature = hidden.view([-1, hidden.size()[-1]])
feature_sentiment = sentiment_representation.view([-1, sentiment_representation.size()[-1]])
if feature.size()[0] != batched_graph.number_of_nodes():
print('error')
batched_graph.ndata['h'] = feature
batched_graph.ndata['h_sentiment'] = feature_sentiment
# gcn_msg = dgl_fn.copy_src(src='h', out='m')
gcn_msg = self._message_func
gcn_reduce = self._attention
batched_graph.update_all(gcn_msg, gcn_reduce)
ug = dgl.unbatch(batched_graph)
output = [torch.unsqueeze(g.ndata.pop('h'), 0).to(self.opt['device']) for g in ug]
output = torch.cat(output, 0)
output_sentiment = [torch.unsqueeze(g.ndata.pop('h_sentiment'), 0).to(self.opt['device']) for g in ug]
output_sentiment = torch.cat(output_sentiment, 0)
if sentiment_representation_flag:
return output, output_sentiment
else:
return output
class DglGraphAttentionForAspectCategoryWithDottedLines(nn.Module):
"""
计算在所有aspect的attention下孩子节点的权重,所有权重求和然后归一化,即得到最终的
孩子节点权重
"""
def __init__(self, in_features, out_features, aspect_attentions, opt, bias=True, activation=F.relu):
super().__init__()
self.opt = opt
self.aspect_attentions: List[AttentionInHtt] = aspect_attentions
self.in_features = in_features
self.out_features = out_features
def element_wise_mul(self, input1, input2, return_not_sum_result=False):
feature_list = []
for feature_1, feature_2 in zip(input1, input2):
feature_2 = feature_2.unsqueeze(1)
feature_2 = feature_2.expand_as(feature_1)
feature = feature_1 * feature_2
feature = feature.unsqueeze(0)
feature_list.append(feature)
output = torch.cat(feature_list, 0)
result = torch.sum(output, 1)
if return_not_sum_result:
return result, output
else:
return result
def _message_func(self, edges: dgl.EdgeBatch):
result = {'m': edges.src['h'], 'dotted_line_masks': edges.data['dotted_line_masks'],
'm_sentiment': edges.src['h_sentiment']}
return result
def _sum(self, nodes: dgl.NodeBatch):
"""Take an average over all neighbor node features hu and use it to
overwrite the original node feature."""
m = nodes.mailbox['m']
accum = torch.sum(m, 1)
return {'h': accum}
def _mean(self, nodes: dgl.NodeBatch):
"""Take an average over all neighbor node features hu and use it to
overwrite the original node feature."""
m = nodes.mailbox['m']
mean = torch.mean(m, 1)
return {'h': mean}
def _attention(self, nodes: dgl.NodeBatch):
"""Take an average over all neighbor node features hu and use it to
overwrite the original node feature."""
m = nodes.mailbox['m']
dotted_line_masks = nodes.mailbox['dotted_line_masks'].to(self.opt['device'])
# for i in range(dotted_line_masks.size()[0]):
# print(dotted_line_masks[i])
alphas = []
for i in range(len(self.aspect_attentions)):
aspect_attention = self.aspect_attentions[i]
alpha_temp = aspect_attention(m, None)
alpha = alpha_temp * dotted_line_masks.float()
alphas.append(alpha.unsqueeze(1))
alpha_cat = torch.cat(alphas, dim=1)
alpha_final = torch.mean(alpha_cat, dim=1)
h = self.element_wise_mul(m, alpha_final, return_not_sum_result=False)
m_sentiment = nodes.mailbox['m_sentiment']
h_sentiment = self.element_wise_mul(m_sentiment, alpha_final, return_not_sum_result=False)
return {'h': h, 'h_sentiment': h_sentiment}
def forward(self, aspect_representation, graphs, sentiment_representation=None):
if sentiment_representation is None:
sentiment_representation_flag = False
# 方便其它逻辑顺利执行
sentiment_representation = aspect_representation
else:
sentiment_representation_flag = True
hidden = aspect_representation
batched_graph = dgl.batch(graphs)
feature = hidden.view([-1, hidden.size()[-1]])
feature_sentiment = sentiment_representation.view([-1, sentiment_representation.size()[-1]])
if feature.size()[0] != batched_graph.number_of_nodes():
print('error')
batched_graph.ndata['h'] = feature
batched_graph.ndata['h_sentiment'] = feature_sentiment
# gcn_msg = dgl_fn.copy_src(src='h', out='m')
gcn_msg = self._message_func
gcn_reduce = self._attention
batched_graph.update_all(gcn_msg, gcn_reduce)
ug = dgl.unbatch(batched_graph)
output = [torch.unsqueeze(g.ndata.pop('h'), 0).to(self.opt['device']) for g in ug]
output = torch.cat(output, 0)
output_sentiment = [torch.unsqueeze(g.ndata.pop('h_sentiment'), 0).to(self.opt['device']) for g in ug]
output_sentiment = torch.cat(output_sentiment, 0)
if sentiment_representation_flag:
return output, output_sentiment
else:
return output
class SentenceConsituencyAwareModelV8(TextInAllAspectSentimentOutModel):
"""
"""
def __init__(self, word_embedder: TextFieldEmbedder, position_embedder: TextFieldEmbedder,
aspect_embedder: TextFieldEmbedder, categories: list, polarities: list, vocab: Vocabulary,
configuration: dict, category_loss_weight=1, sentiment_loss_weight=1):
super().__init__(vocab, category_loss_weight=category_loss_weight, sentiment_loss_weight=sentiment_loss_weight)
self.configuration = configuration
self.word_embedder = word_embedder
self.position_embedder = position_embedder
self.aspect_embedder = aspect_embedder
self.categories = categories
self.polarites = polarities
self.category_num = len(categories)
self.polarity_num = len(polarities)
self.category_loss = nn.BCEWithLogitsLoss()
self.sentiment_loss = nn.CrossEntropyLoss()
self._accuracy = metrics.CategoricalAccuracy()
self._f1 = allennlp_metrics.BinaryF1(0.5)
word_embedding_dim = word_embedder.get_output_dim()
aspect_encoder_input_size = word_embedding_dim
if self.configuration['aspect_position']:
aspect_encoder_input_size += position_embedder.get_output_dim()
if self.configuration['lstm_or_fc_after_embedding_layer'] == 'fc':
self.embedding_layer_fc = nn.Linear(aspect_encoder_input_size, word_embedding_dim, bias=True)
elif self.configuration['lstm_or_fc_after_embedding_layer'] == 'bilstm':
self.embedding_layer_lstm = torch.nn.LSTM(aspect_encoder_input_size, int(word_embedding_dim / 2), batch_first=True,
bidirectional=True, num_layers=1)
else:
self.embedding_layer_lstm = torch.nn.LSTM(aspect_encoder_input_size, word_embedding_dim, batch_first=True,
bidirectional=False, num_layers=1)
self.embedding_layer_aspect_attentions = [AttentionInHtt(word_embedding_dim,
word_embedding_dim)
for _ in range(self.category_num)]
| |
<reponame>pinjutien/shap
from ..utils import partition_tree_shuffle, MaskedModel
from .._explanation import Explanation
from ._explainer import Explainer
import numpy as np
import pandas as pd
import scipy as sp
from .. import links
class Permutation(Explainer):
""" This method approximates the Shapley values by iterating through permutations of the inputs.
This is a model agnostic explainer that gurantees local accuracy (additivity) by iterating completely
through an entire permutatation of the features in both forward and reverse directions. If we do this
once, then we get the exact SHAP values for models with up to second order interaction effects. We can
iterate this many times over many random permutations to get better SHAP value estimates for models
with higher order interactions. This sequential ordering formulation also allows for easy reuse of
model evaluations and the ability to effciently avoid evaluating the model when the background values
for a feature are the same as the current input value. We can also account for hierarchial data
structures with partition trees, something not currently implemented for KernalExplainer or SamplingExplainer.
"""
def __init__(self, model, masker, link=links.identity):
""" Build an explainers.Permutation object for the given model using the given masker object.
Parameters
----------
model : function
A callable python object that executes the model given a set of input data samples.
masker : function or numpy.array or pandas.DataFrame
A callable python object used to "mask" out hidden features of the form `masker(x, mask)`.
It takes a single input sample and a binary mask and returns a matrix of masked samples. These
masked samples are evaluated using the model function and the outputs are then averaged.
As a shortcut for the standard masking using by SHAP you can pass a background data matrix
instead of a function and that matrix will be used for masking. To use a clustering
game structure you can pass a shap.maksers.Tabular(data, clustering=\"correlation\") object.
"""
super(Permutation, self).__init__(model, masker, link=link)
def explain_row(self, *row_args, max_evals, main_effects, error_bounds, silent):
""" Explains a single row and returns the tuple (row_values, row_expected_values, row_mask_shapes).
"""
# build a masked version of the model for the current input sample
fm = MaskedModel(self.model, self.masker, self.link, *row_args)
# by default we run 10 permutations forward and backward
if max_evals == "auto":
max_evals = 10 * 2 * len(fm)
# loop over many permutations
inds = fm.varying_inputs()
inds_mask = np.zeros(len(fm), dtype=np.bool)
inds_mask[inds] = True
masks = np.zeros(2*len(inds)+1, dtype=np.int)
masks[0] = MaskedModel.delta_mask_noop_value
npermutations = max_evals // (2*len(inds)+1)
row_values = np.zeros(len(fm))
for _ in range(npermutations):
# shuffle the indexes so we get a random permutation ordering
if getattr(self.masker, "partition_tree", None) is not None:
# [TODO] This is shuffle does not work when inds is not a complete set of integers from 0 to M
#assert len(inds) == len(fm), "Need to support partition shuffle when not all the inds vary!!"
partition_tree_shuffle(inds, inds_mask, self.masker.partition_tree)
else:
np.random.shuffle(inds)
# create a large batch of masks to evaluate
i = 1
for ind in inds:
masks[i] = ind
i += 1
for ind in inds:
masks[i] = ind
i += 1
# evaluate the masked model
outputs = fm(masks)
# update our SHAP value estimates
for i,ind in enumerate(inds):
row_values[ind] += outputs[i+1] - outputs[i]
for i,ind in enumerate(inds):
row_values[ind] += outputs[i+1] - outputs[i]
expected_value = outputs[0]
# compute the main effects if we need to
main_effect_values = fm.main_effects(inds) if main_effects else None
#return row_values / (2 * npermutations), expected_value, fm.mask_shapes, main_effect_values
return {
"values": row_values / (2 * npermutations),
"expected_values": expected_value,
"mask_shapes": fm.mask_shapes,
"main_effects": main_effect_values
}
def shap_values(self, X, npermutations=10, main_effects=False, error_bounds=False, batch_evals=True, silent=False):
""" Legacy interface to estimate the SHAP values for a set of samples.
Parameters
----------
X : numpy.array or pandas.DataFrame or any scipy.sparse matrix
A matrix of samples (# samples x # features) on which to explain the model's output.
npermutations : int
Number of times to cycle through all the features, re-evaluating the model at each step.
Each cycle evaluates the model function 2 * (# features + 1) times on a data matrix of
(# background data samples) rows. An exception to this is when PermutationExplainer can
avoid evaluating the model because a feature's value is the same in X and the background
dataset (which is common for example with sparse features).
Returns
-------
For models with a single output this returns a matrix of SHAP values
(# samples x # features). Each row sums to the difference between the model output for that
sample and the expected value of the model output (which is stored as expected_value
attribute of the explainer). For models with vector outputs this returns a list
of such matrices, one for each output.
"""
explanation = self(X, max_evals=npermutations * X.shape[1], main_effects=main_effects)
return explanation._old_format()
# # convert dataframes
# self.dataframe_columns = None
# if str(type(X)).endswith("pandas.core.series.Series'>"):
# X = X.values
# elif str(type(X)).endswith("'pandas.core.frame.DataFrame'>"):
# self.dataframe_columns = list(X.columns)
# X = X.values
# x_type = str(type(X))
# arr_type = "'numpy.ndarray'>"
# # if sparse, convert to lil for performance
# if sp.sparse.issparse(X) and not sp.sparse.isspmatrix_lil(X):
# X = X.tolil()
# assert x_type.endswith(arr_type) or sp.sparse.isspmatrix_lil(X), "Unknown instance type: " + x_type
# assert len(X.shape) == 1 or len(X.shape) == 2, "Instance must have 1 or 2 dimensions!"
# # single instance
# if len(X.shape) == 1:
# data = X.reshape((1, X.shape[0]))
# row_phi, row_phi_min, row_phi_max, row_phi_main = self.explain(
# data, npermutations=npermutations, main_effects=main_effects, batch_evals=batch_evals,
# error_bounds=error_bounds
# )
# # vector-output
# s = row_phi.shape
# if len(s) == 2:
# outs = [np.zeros(s[0]) for j in range(s[1])]
# for j in range(s[1]):
# outs[j] = row_phi[:, j]
# return outs
# # single-output
# else:
# out = np.zeros(s[0])
# out[:] = row_phi
# return out
# # explain the whole dataset
# elif len(X.shape) == 2:
# explanations = []
# explanations_min = []
# explanations_max = []
# explanations_main = []
# for i in tqdm(range(X.shape[0]), disable=silent):
# data = X[i:i + 1, :]
# row_phi, row_phi_min, row_phi_max, row_phi_main = self.explain(
# data, npermutations=npermutations, main_effects=main_effects,
# batch_evals=batch_evals, error_bounds=error_bounds
# )
# explanations.append(row_phi)
# explanations_min.append(row_phi_min)
# explanations_max.append(row_phi_max)
# explanations_main.append(row_phi_main)
# # vector-output
# s = explanations[0].shape
# if len(s) == 2:
# outs = [np.zeros((X.shape[0], s[0])) for j in range(s[1])]
# outs_min = copy.deepcopy(outs)
# outs_max = copy.deepcopy(outs)
# outs_main = copy.deepcopy(outs)
# for i in range(X.shape[0]):
# for j in range(s[1]):
# outs[j][i] = explanations[i][:, j]
# if error_bounds:
# outs_min[j][i] = explanations_min[i][:, j]
# outs_max[j][i] = explanations_max[i][:, j]
# if main_effects:
# outs_main[j][i] = explanations_main[i][:, j]
# if error_bounds or main_effects:
# return outs, outs_min, outs_max, out_main
# else:
# return outs
# # single-output
# else:
# out = np.zeros((X.shape[0], s[0]))
# out_min = copy.deepcopy(out)
# out_max = copy.deepcopy(out)
# out_main = copy.deepcopy(out)
# for i in range(X.shape[0]):
# out[i] = explanations[i]
# if error_bounds:
# out_min[i] = explanations_min[i]
# out_max[i] = explanations_max[i]
# if main_effects:
# out_main[i] = explanations_main[i]
# if error_bounds or main_effects:
# return out, out_min, out_max, out_main
# else:
# return out
# def explain(self, incoming_instance, **kwargs):
# # convert incoming input to a standardized iml object
# instance = convert_to_instance(incoming_instance)
# match_instance_to_data(instance, self.data)
# error_bounds = True
# #assert len(self.data.groups) == self.P, "PermutationExplainer does not support feature groups!"
# # find the feature groups we will test. If a feature does not change from its
# # current value then we know it doesn't impact the model
# self.varyingInds = self.varying_groups(instance.x)
# #self.varyingFeatureGroups = [self.data.groups[i] for i in self.varyingInds]
# self.M = len(self.varyingInds)
# # find f(x)
# if self.keep_index:
# model_out = self.model.f(instance.convert_to_df())
# else:
# model_out = self.model.f(instance.x)
# if isinstance(model_out, (pd.DataFrame, pd.Series)):
# model_out = model_out.values[0]
# self.fx = model_out[0]
# if not self.vector_out:
# self.fx = np.array([self.fx])
# # if no features vary then there no feature has an effect
# if self.M == 0:
# | |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals, print_function
import io
import re
import numpy as np
from . import asdftypes
from . import block
from . import constants
from . import generic_io
from . import reference
from . import resolver
from . import util
from . import treeutil
from . import versioning
from . import yamlutil
from .tags.core.asdf import AsdfObject
class AsdfFile(versioning.VersionedMixin):
"""
The main class that represents a ASDF file.
"""
def __init__(self, tree=None, uri=None,
tag_to_schema_resolver=None,
url_mapping=None,
type_index=None):
"""
Parameters
----------
tree : dict or AsdfFile, optional
The main tree data in the ASDF file. Must conform to the
ASDF schema.
uri : str, optional
The URI for this ASDF file. Used to resolve relative
references against. If not provided, will automatically
determined from the associated file object, if possible
and if created from `AsdfFile.read`.
Other Parameters
----------------
tag_to_schema_resolver : callable, optional
A callback used to convert tag names into schema
URIs. The callable must take a string and return a string
or `None`. If not provided, the default
`astropy.resolvers.TagToSchemaResolver` will be used.
url_mapping : callable, optional
A callback function used to map URIs to other URIs. The
callable must take a string and return a string or `None`.
This is useful, for example, when a remote resource has a
mirror on the local filesystem that you wish to use.
type_index : pyasdf.asdftypes.AsdfTypeIndex, optional
A type index object used to lookup custom ASDF types. It
must have two methods:
- `from_custom_type`: Given an object, return the
corresponding `pyasdf.asdftypes.AsdfType` subclass.
- `from_yaml_tag`: Given a YAML tag as a string, return the
corresponding `pyasdf.asdftypes.AsdfType` subclass.
"""
if tag_to_schema_resolver is None:
tag_to_schema_resolver = resolver.TAG_TO_SCHEMA_RESOLVER
self._tag_to_schema_resolver = tag_to_schema_resolver
if url_mapping is None:
url_mapping = resolver.URL_MAPPING
self._url_mapping = url_mapping
if type_index is None:
type_index = asdftypes.AsdfTypeIndex()
self._type_index = type_index
self._fd = None
self._external_asdf_by_uri = {}
self._blocks = block.BlockManager(self)
if tree is None:
self.tree = {}
self._uri = uri
elif isinstance(tree, AsdfFile):
self._uri = tree.uri
self._tree = tree.tree
self.run_modifying_hook('copy_to_new_asdf')
self.find_references()
self._uri = uri
else:
self.tree = tree
self._uri = uri
self.find_references()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
"""
Close the file handles associated with the `AsdfFile`.
"""
if self._fd:
# This is ok to always do because GenericFile knows
# whether it "owns" the file and should close it.
self._fd.close()
self._fd = None
for external in self._external_asdf_by_uri.values():
external.close()
self._external_asdf_by_uri.clear()
@property
def uri(self):
"""
Get the URI associated with the `AsdfFile`.
In many cases, it is automatically determined from the file
handle used to read or write the file.
"""
if self._uri is not None:
return self._uri
if self._fd is not None:
return self._fd._uri
return None
@property
def tag_to_schema_resolver(self):
return self._tag_to_schema_resolver
@property
def url_mapping(self):
return self._url_mapping
@property
def type_index(self):
return self._type_index
def resolve_uri(self, uri):
"""
Resolve a (possibly relative) URI against the URI of this ASDF
file. May be overridden by base classes to change how URIs
are resolved. This does not apply any `uri_mapping` that was
passed to the constructor.
Parameters
----------
uri : str
An absolute or relative URI to resolve against the URI of
this ASDF file.
Returns
-------
uri : str
The resolved URI.
"""
return generic_io.resolve_uri(self.uri, uri)
def read_external(self, uri):
"""
Load an external ASDF file, from the given (possibly relative)
URI. There is a cache (internal to this ASDF file) that ensures
each external ASDF file is loaded only once.
Parameters
----------
uri : str
An absolute or relative URI to resolve against the URI of
this ASDF file.
Returns
-------
asdffile : AsdfFile
The external ASDF file.
"""
# For a cache key, we want to ignore the "fragment" part.
base_uri = util.get_base_uri(uri)
resolved_uri = self.resolve_uri(base_uri)
# A uri like "#" should resolve back to ourself. In that case,
# just return `self`.
if resolved_uri == '' or resolved_uri == self.uri:
return self
asdffile = self._external_asdf_by_uri.get(resolved_uri)
if asdffile is None:
asdffile = self.read(resolved_uri)
self._external_asdf_by_uri[resolved_uri] = asdffile
return asdffile
@property
def tree(self):
"""
Get the tree of data in the ASDF file.
When set, the tree will be validated against the ASDF schema.
"""
return self._tree
@tree.setter
def tree(self, tree):
yamlutil.validate(tree, self)
self._tree = AsdfObject(tree)
def make_reference(self, path=[]):
"""
Make a new reference to a part of this file's tree, that can be
assigned as a reference to another tree.
Parameters
----------
path : list of str and int, optional
The parts of the path pointing to an item in this tree.
If omitted, points to the root of the tree.
Returns
-------
reference : reference.Reference
A reference object.
Examples
--------
For the given AsdfFile ``ff``, add an external reference to the data in
an external file::
>>> import pyasdf
>>> flat = pyasdf.open("http://stsci.edu/reference_files/flat.asdf") # doctest: +SKIP
>>> ff.tree['flat_field'] = flat.make_reference(['data']) # doctest: +SKIP
"""
return reference.make_reference(self, path)
@property
def blocks(self):
"""
Get the block manager associated with the `AsdfFile`.
"""
return self._blocks
def set_array_storage(self, arr, array_storage):
"""
Set the block type to use for the given array data.
Parameters
----------
arr : numpy.ndarray
The array to set. If multiple views of the array are in
the tree, only the most recent block type setting will be
used, since all views share a single block.
array_storage : str
Must be one of:
- ``internal``: The default. The array data will be
stored in a binary block in the same ASDF file.
- ``external``: Store the data in a binary block in a
separate ASDF file.
- ``inline``: Store the data as YAML inline in the tree.
"""
self.blocks[arr].array_storage = array_storage
def get_array_storage(self, arr):
"""
Get the block type for the given array data.
Parameters
----------
arr : numpy.ndarray
"""
return self.blocks[arr].array_storage
@classmethod
def _parse_header_line(cls, line):
"""
Parses the header line in a ASDF file to obtain the ASDF version.
"""
regex = (constants.ASDF_MAGIC +
b'(?P<major>[0-9]+)\.(?P<minor>[0-9]+)\.(?P<micro>[0-9]+)')
match = re.match(regex, line)
if match is None:
raise ValueError("Does not appear to be a ASDF file.")
return (int(match.group("major")),
int(match.group("minor")),
int(match.group("micro")))
@classmethod
def read(cls, fd, uri=None, mode='r',
tag_to_schema_resolver=None,
url_mapping=None,
type_index=None,
_get_yaml_content=False):
"""
Open an existing ASDF file.
Parameters
----------
fd : string or file-like object
May be a string ``file`` or ``http`` URI, or a Python
file-like object.
uri : string, optional
The URI of the file. Only required if the URI can not be
automatically determined from `fd`.
mode : string, optional
The mode to open the file in. Must be ``r`` (default) or
``rw``.
Other Parameters
----------------
**kwargs : extra parameters
See `pyasdf.AsdfFile` for a description of the other
parameters.
Returns
-------
asdffile : AsdfFile
The new AsdfFile object.
"""
fd = generic_io.get_file(fd, mode=mode, uri=uri)
self = cls(
tag_to_schema_resolver=tag_to_schema_resolver,
url_mapping=url_mapping,
type_index=type_index)
self._fd = fd
try:
header_line = fd.read_until(b'\r?\n', "newline", include=True)
except ValueError:
raise ValueError("Does not appear to be a ASDF file.")
self.version = cls._parse_header_line(header_line)
yaml_token = fd.read(4)
yaml_content = b''
has_blocks = False
if yaml_token == b'%YAM':
# The yaml content is read now, but we parse it after finding
# all of the blocks, so that arrays can be resolved to their
# blocks immediately.
yaml_content = yaml_token + fd.read_until(
constants.YAML_END_MARKER_REGEX, 'End of YAML marker',
include=True)
has_blocks = fd.seek_until(constants.BLOCK_MAGIC, include=True)
elif yaml_token == constants.BLOCK_MAGIC:
has_blocks = True
elif yaml_token != b'':
raise IOError("ASDF file appears to contain garbage after header.")
# For testing: just return the raw YAML content
if _get_yaml_content:
fd.close()
return yaml_content
if has_blocks:
self._blocks.read_internal_blocks(fd, past_magic=True)
if len(yaml_content):
tree = yamlutil.load_tree(yaml_content, self)
self.run_hook('post_read')
self._tree = tree
else:
self._tree = {}
return self
def _write_tree(self, tree, fd, pad_blocks):
fd.write(constants.ASDF_MAGIC)
fd.write(self.version_string.encode('ascii'))
fd.write(b'\n')
if len(tree):
yamlutil.dump_tree(tree, fd, self)
if pad_blocks:
padding = util.calculate_padding(
fd.tell(), pad_blocks, fd.block_size)
fd.fast_forward(padding)
def _pre_write(self, fd, exploded):
if exploded and fd.uri is None:
raise ValueError(
"Can not write an exploded file without knowing its URI.")
if len(self._tree):
self.run_hook('pre_write')
# This is where we'd do some more sophisticated block
# reorganization, if necessary
self._blocks.finalize(self, exploded=exploded)
def | |
# TODO Weak references!
import weakref, traceback
import wx
class MiscEventSourceMixin:
"""
Mixin class to handle misc events
"""
def __init__(self):
self._MiscEventSourceMixin__miscevent = None
def getMiscEvent(self):
if (not hasattr(self, "_MiscEventSourceMixin__miscevent")) or \
(not self._MiscEventSourceMixin__miscevent):
self._MiscEventSourceMixin__miscevent = MiscEvent(self)
return self._MiscEventSourceMixin__miscevent
def removeMiscEvent(self):
if hasattr(self, "_MiscEventSourceMixin__miscevent"):
del self._MiscEventSourceMixin__miscevent
def fireMiscEventProps(self, props, first=None, shareListenerList=False):
"""
props -- Dictionary {key: value} with properties
first -- first object to call its miscEventHappened method
before other listeners are processed or None
return: create clone event
"""
return self.getMiscEvent().createCloneAddProps(props,
shareListenerList=shareListenerList).processSend(first)
def fireMiscEventKeys(self, keys, first=None, shareListenerList=False):
"""
keys -- Sequence with key strings
first -- first object to call its miscEventHappened method
before other listeners are processed or None
return: create clone event
"""
return self.getMiscEvent().createCloneAddKeys(keys,
shareListenerList=shareListenerList).processSend(first)
class ListenerList(object):
__slots__ = ("__weakref__", "listeners", "userCount", "cleanupFlag",
"parentList")
def __init__(self):
self.listeners = []
self.userCount = 0
self.cleanupFlag = False
self.parentList = None # Don't know yet what it's good for
def clone(self):
result = ListenerList()
result.listeners = self.listeners[:]
result.userCount = 0
result.parentList = self
if self.cleanupFlag:
result.cleanDeadRefs()
return result
def addListener(self, listener, isWeak=True):
"""
isWeak -- Iff true, store weak reference to listener instead
of listener itself
"""
if isWeak:
self.listeners.append(weakref.ref(listener))
else:
self.listeners.append(listener)
def removeListener(self, listener):
if self.userCount == 0:
# No users -> manipulate list directly
try:
self.listeners.remove(weakref.ref(listener))
except ValueError:
try:
self.listeners.remove(listener)
except ValueError:
# Wasn't in the list
pass
else:
# Invalidate listener
i = self.findListener(listener)
if i != -1:
self.invalidateObjectAt(i)
def findListener(self, listener):
try:
return self.listeners.index(weakref.ref(listener))
except ValueError:
try:
return self.listeners.index(listener)
except ValueError:
return -1
def hasListener(self, listener):
return self.findListener(listener) != -1
# try:
# self.listeners.index(weakref.ref(listener))
# return True
# except ValueError:
# try:
# self.listeners.index(listener)
# return True
# except ValueError:
# return False
def setListeners(self, listeners):
self.listeners = listeners
def incListenerUser(self):
self.userCount += 1
return self.listeners
def decListenerUser(self):
if self.userCount > 0:
self.userCount -= 1
if self.userCount == 0 and self.cleanupFlag:
self.cleanDeadRefs()
self.cleanupFlag = False
def setCleanupFlag(self, value=True):
self.cleanupFlag = value
def getActualObject(lref):
if lref is None:
return None
if isinstance(lref, weakref.ReferenceType):
return lref() # Retrieve real object from weakref object
return lref
getActualObject = staticmethod(getActualObject)
def getObjectAt(self, i):
lref = self.listeners[i]
if lref is None:
self.cleanupFlag = True
return None
if isinstance(lref, weakref.ReferenceType):
l = lref()
if l is None:
self.cleanupFlag = True
return None
else:
l = lref
return l # Return real
def invalidateObjectAt(self, i):
"""
Sets listener at index i to None (invalid) and flags list for
cleaning.
"""
self.listeners[i] = None
self.cleanupFlag = True
def cleanDeadRefs(self):
"""
Remove references to already deleted objects.
"""
i = 0
while i < len(self.listeners):
if self.getActualObject(self.listeners[i]) is None:
del self.listeners[i]
continue # Do not increment i here
i += 1
def __len__(self):
return len(self.listeners)
def __repr__(self):
return "<MiscEvent.ListenerList " + hex(id(self)) + " " + \
repr(self.listeners) + ">"
class MiscEvent(object):
__slots__ = ("__weakref__", "listenerList", "source", "properties", "parent",
"activeListenerIndex")
def __init__(self, source = None):
self.listenerList = ListenerList()
self.source = source
self.properties = None
self.parent = None
# Index into self.listeners which listener is currently called
# needed for noChildrenForMe().
self.activeListenerIndex = -1
def __repr__(self):
return "<MiscEvent.MiscEvent(%s, %s, %s)>" % (self.source, self.properties,
self.listenerList)
def getSource(self):
return self.source
def setSource(self, source):
self.source = source
def getListenerList(self):
return self.listenerList
def getMiscEvent(self):
return self
def get(self, key, default = None):
"""
Return value for specified key or default if not found.
Be careful: The value itself may be None.
"""
return self.properties.get(key, default)
def has_key(self, key):
"""
Has the event the specified key?
"""
return self.properties.has_key(key)
def has_key_in(self, keyseq):
"""
Returns true iff it has at least one key in the sequence of keys keyseq
"""
for key in keyseq:
if self.has_key(key):
return True
return False
def getParent(self):
"""
The MiscEvent which was called to fire this clone. If it returns null this is not a clone.
"""
return self.parent
def clone(self, shareListenerList=False):
"""
Normally you shouldn't call this method directly,
call createClone() instead
"""
result = MiscEvent()
if shareListenerList:
result.listenerList = self.listenerList
else:
result.listenerList = self.listenerList.clone()
if self.properties is not None:
result.properties = self.properties.copy()
return result
# A MiscEvent manages the listener list itself.
def addListener(self, listener, isWeak=True):
"""
isWeak -- Iff true, store weak reference to listener instead
of listener itself
"""
return self.listenerList.addListener(listener, isWeak)
def removeListener(self, listener):
return self.listenerList.removeListener(listener)
def hasListener(self, listener):
return self.listenerList.hasListener(listener)
def setListeners(self, listeners):
return self.listenerList.setListeners(listeners)
def setListenerList(self, listenerList):
self.listenerList = listenerList
def put(self, key, value = None):
"""
Add a key-value pair to the internal Hashtable.
<B>Can't be called on an original MiscEvent, must be a clone.</B>
@return this, so you can chain the call: event = event.put("a", a).put("foo", bar);
@throws NullPointerException if key is null
@throws IllegalArgumentException if this is not a clone
"""
if self.getParent() is None:
raise StandardError("This must be a clone") # TODO Create/Find a better exception
self.properties[key] = value
return self
def cleanDeadRefs(self):
"""
Remove references to already deleted objects. Mainly called by processSend
to clean the parent event if a child finds a deadref.
"""
## Automatically calls cleanDeadRefs of its parent event (if existing).
self.listenerList.cleanDeadRefs()
# parent = self.getParent()
# if parent is not None:
# parent.cleanDeadRefs()
def processSend(self, first=None):
"""
Called on the clone to dispatch itself to first, then to all listeners.
<B>Can't be called on an original MiscEvent, must be a clone.</B>
@param first the first listener the event dispatches before dispatching to remaining listeners. A null value is ignored.
@throws IllegalArgumentException if this is not a clone
"""
if self.getParent() is None:
raise StandardError("This must be a clone") # TODO Create/Find a better exception
if first is not None:
first.miscEventHappened(self);
self.listenerList.incListenerUser()
try:
i = 0
while i < len(self.listenerList):
l = self.listenerList.getObjectAt(i)
if l is None:
i += 1
continue
self.activeListenerIndex = i
try:
l.miscEventHappened(self)
except wx.PyDeadObjectError:
# The object is a wxPython object for which the C++ part was
# deleted already, so remove object from listener list.
self.listenerList.invalidateObjectAt(i)
except:
traceback.print_exc()
i += 1
finally:
self.listenerList.decListenerUser()
self.activeListenerIndex = -1
return self
def createClone(self, shareListenerList=False):
"""
Creates a clone with the appropriate data, so dispatching can be done later.<BR>
Some methods can be called only on a cloned MiscEvent.
To add properties, use the put() method.
_source -- The object which will dispatch the event
"""
event = self.clone(shareListenerList=shareListenerList)
if event.properties is None:
event.properties = {}
event.source = self.source
event.parent = self
return event
def getProps(self):
"""
Return properties dictionary. The returned dictionary should not
be altered.
"""
return self.properties
def addProps(self, addprops):
"""
Add/update properties of the event
@param addprops Dictionary with additional properties
@return self
"""
self.properties.update(addprops)
return self
def addKeys(self, addkeys):
"""
Add/update keys of the event
@param addkeys Sequence with additional keys for properties
@return self
"""
for k in addkeys:
self.properties[k] = True
return self
def createCloneAddProps(self, addprops, shareListenerList=False):
"""
Creates a clone with the appropriate data, so dispatching can be done later.<BR>
Some methods can be called only on a cloned MiscEvent.
@param addprops Dictionary with additional properties
"""
event = self.createClone(shareListenerList=shareListenerList)
event.properties.update(addprops)
return event
def createCloneAddKeys(self, addkeys, shareListenerList=False):
"""
Creates a clone with the appropriate data, so dispatching can be done later.<BR>
Some methods can be called only on a cloned MiscEvent.
@param addkeys Sequence with additional keys for properties
"""
event = self.createClone(shareListenerList=shareListenerList)
for k in addkeys:
event.properties[k] = True
return event
# def noChildrenForMe():
# """
# Called by a listener to ensure that it doesn't get any child events
# of this event
# """
# if self.activeListenerIndex == -1:
# # TODO Create/Find a better exception
# raise StandardError("Must be called during processing of an event")
#
# self.listeners[self.activeListenerIndex] = None
# TODO Derivation from MiscEvent is not elegant
class ProxyMiscEvent(MiscEvent):
"""
This specialized MiscEvent registers as listener to a list of other
MiscEvents and resends any events send by them.
"""
__slots__ = ("watchedEvents",)
def __init__(self, source=None):
MiscEvent.__init__(self, source)
self.watchedEvents = ()
| |
<reponame>avinetworks/servicemesh
import copy
import logging
import os
import yaml
import avi.migrationtools.f5_converter.converter_constants as final
from avi.migrationtools.f5_converter.conversion_util import F5Util
from avi.migrationtools.avi_migration_utils import update_count
LOG = logging.getLogger(__name__)
ssl_count = {'count': 0}
# Creating f5 object for util library.
conv_utils = F5Util()
class ProfileConfigConv(object):
@classmethod
def get_instance(cls, version, f5_profile_attributes,
object_merge_check, prefix, keypassphrase):
"""
:param version: version of f5 instance
:param f5_profile_attributes: yaml attribute file for object
:param object_merge_check: Flag for object merge
:param prefix: prefix for objects
:param keypassphrase: path of keypassphrase
:return: object of respective f5 version object.
"""
f5_profile_attributes = f5_profile_attributes
if version == '10':
return ProfileConfigConvV10(
f5_profile_attributes, object_merge_check, prefix,
keypassphrase)
if version in ['11', '12']:
return ProfileConfigConvV11(
f5_profile_attributes, object_merge_check, prefix,
keypassphrase)
default_key = None
ciphers = 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-' \
'ECDSA-AES256-SHA:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-ECDSA-' \
'AES128-SHA256:ECDHE-ECDSA-AES256-SHA384:AES128-GCM-SHA256:' \
'AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:' \
'AES256-SHA:DES-CBC3-SHA'
def convert_profile(self, profile, key, f5_config, profile_config,
avi_config, input_dir, user_ignore, tenant_ref,
key_and_cert_mapping_list, merge_object_mapping,
sys_dict):
pass
def convert(self, f5_config, avi_config, input_dir, user_ignore,
tenant_ref, cloud_ref, merge_object_mapping, sys_dict):
"""
:param f5_config: parsed f5 config dict.
:param avi_config: avi config dict for converted avi conversion.
:param input_dir: Location of cert and external monitor script files
:param user_ignore: Ignore config defined by user
:param tenant_ref: tenant ref for avi objects
:param cloud_ref: cloud ref for avi objects
:param merge_object_mapping: merged object dict for merging objects
:param sys_dict: baseline objects
:return:
"""
profile_config = f5_config.get("profile", {})
avi_config["StringGroup"] = []
avi_config['HTTPPolicySet'] = []
avi_config['OneConnect'] = []
key_and_cert_mapping_list = []
persistence = f5_config.get("persistence", None)
if not persistence:
f5_config['persistence'] = {}
avi_config['UnsupportedProfiles'] = []
print "\nConverting Profiles ..."
# Added variable to get total object count.
progressbar_count = 0
total_size = len(profile_config.keys())
for key in profile_config.keys():
progressbar_count += 1
profile_type = None
name = None
try:
profile_type, name = key.split(" ")
tenant, name = conv_utils.get_tenant_ref(name)
if not tenant_ref == 'admin':
tenant = tenant_ref
if profile_type not in self.supported_types:
msg = ("Skipped not supported profile: %s of type: %s"
% (name, profile_type))
LOG.warning(msg)
conv_utils.add_status_row('profile', profile_type, name,
final.STATUS_SKIPPED, msg)
avi_config['UnsupportedProfiles'].append(name)
continue
# Added prefix for objects
if self.prefix:
name = self.prefix + '-' + name
LOG.debug("Converting profile: %s" % name)
profile = profile_config[key]
if not profile:
LOG.warn('Empty config for profile %s Skipping the config'
% name)
conv_utils.add_status_row(
'profile', profile_type, name,
final.STATUS_NOT_APPLICABLE, 'Empty config')
continue
# print key, profile
profile = self.update_with_default_profile(
profile_type, profile, profile_config, name)
u_ignore = user_ignore.get('profile', {})
self.convert_profile(
profile, key, f5_config, profile_config, avi_config,
input_dir, u_ignore, tenant, key_and_cert_mapping_list,
merge_object_mapping, sys_dict)
LOG.debug("Conversion successful for profile: %s" % name)
except:
update_count('error')
LOG.error("Failed to convert profile: %s" % key, exc_info=True)
if name:
conv_utils.add_status_row('profile', profile_type, name,
final.STATUS_ERROR)
else:
conv_utils.add_status_row('profile', key, key,
final.STATUS_ERROR)
# Added call to check progress.
msg = "Profile conversion started..."
conv_utils.print_progress_bar(progressbar_count, total_size, msg,
prefix='Progress', suffix='')
count = len(avi_config["SSLProfile"])
count += len(avi_config["PKIProfile"])
count += len(avi_config["ApplicationProfile"])
count += len(avi_config["NetworkProfile"])
LOG.debug("Converted %s profiles" % count)
f5_config.pop("profile")
del key_and_cert_mapping_list
def update_with_default_profile(self, profile_type, profile,
profile_config, profile_name):
"""
Profiles can have inheritance used by attribute defaults-from in F5
configuration this method recursively gets all the attributes from the
default objects and forms complete object
:param profile_type: type of profile
:param profile: currant profile object
:param profile_config: F5 profile config dict
:param profile_name: Name of profile
:return: Complete profile with updated attributes from defaults
"""
parent_name = profile.get(self.default_key, None)
if parent_name and '/' in parent_name:
parent_name = parent_name.split('/')[-1]
if parent_name and profile_name != parent_name:
parent_profile = profile_config.get(profile_type + " " +
parent_name, None)
if parent_profile:
parent_profile = self.update_with_default_profile(
profile_type, parent_profile, profile_config, parent_name)
parent_profile = copy.deepcopy(parent_profile)
parent_profile.update(profile)
profile = parent_profile
return profile
def update_key_cert_obj(self, name, key_file_name, cert_file_name,
input_dir, tenant, avi_config, converted_objs,
default_profile_name, key_and_cert_mapping_list,
merge_object_mapping, sys_dict):
"""
This method create the certs if certificate not present at location
it create dummy certificate.
:param name: name of certificate.
:param key_file_name: name of keyfile of cert
:param cert_file_name: name of cert file
:param input_dir: location of cert and key
:param tenant: tenant name
:param avi_config: converted avi config dict
:param converted_objs: list of converted object profile
:param default_profile_name: name of default profile name.
:param key_and_cert_mapping_list: list of key and cert
:param merge_object_mapping: merged object dict for merging objects
:param sys_dict: baseline objects
:return:
"""
cert_name = [cert['name'] for cert in key_and_cert_mapping_list if
cert['key_file_name'] == key_file_name and
cert['cert_file_name'] == cert_file_name]
if cert_name:
LOG.warning(
'SSL key and Certificate is already exist for %s and %s is %s' %
(key_file_name, cert_file_name, cert_name[0]))
return
folder_path = input_dir + os.path.sep
key = None
cert = None
if key_file_name and cert_file_name:
# Removed / from key_file_name to get name of file.
if '/' in key_file_name:
key_file_name = key_file_name.split('/')[-1]
# Removed / from cert_file_name to get name of file.
if '/' in cert_file_name:
cert_file_name = cert_file_name.split('/')[-1]
key = conv_utils.upload_file(folder_path + key_file_name)
cert = conv_utils.upload_file(folder_path + cert_file_name)
is_key_protected = False
if key:
# Check kay is passphrase protected or not
is_key_protected = conv_utils.is_certificate_key_protected(
input_dir + os.path.sep + key_file_name)
if cert and key:
# Flag to check expiry date of certificate. if expired then
# create dummy certificate.
if not conv_utils.check_certificate_expiry(input_dir,
cert_file_name):
cert, key = None, None
key_passphrase = None
# Get the key passphrase for key_file
if is_key_protected and self.f5_passphrase_keys:
key_passphrase = self.f5_passphrase_keys.get(key_file_name, None)
if is_key_protected and not key_passphrase:
key = None
if not key or not cert:
key, cert = conv_utils.create_self_signed_cert()
name += '-dummy'
LOG.warning('Create self cerificate and key for : %s' % name)
ssl_kc_obj = None
if key and cert:
cert = {"certificate": cert}
ssl_kc_obj = {
'name': name,
'tenant_ref': conv_utils.get_object_ref(tenant, 'tenant'),
'key': key,
'certificate': cert,
'type': 'SSL_CERTIFICATE_TYPE_VIRTUALSERVICE'
}
if key_passphrase:
ssl_kc_obj['key_passphrase'] = key_passphrase
if ssl_kc_obj:
cert_obj = {'key_file_name': key_file_name,
'cert_file_name': cert_file_name,
'name': name
}
key_and_cert_mapping_list.append(cert_obj)
LOG.info('Added new SSL key and certificate for %s' % name)
if ssl_kc_obj:
if self.object_merge_check:
if 'dummy' not in ssl_kc_obj['name']:
conv_utils.update_skip_duplicates(ssl_kc_obj,
avi_config['SSLKeyAndCertificate'],'ssl_cert_key',
converted_objs, name, default_profile_name,
merge_object_mapping, None, self.prefix, sys_dict[
'SSLKeyAndCertificate'])
else:
converted_objs.append({'ssl_cert_key': ssl_kc_obj})
avi_config['SSLKeyAndCertificate'].append(ssl_kc_obj)
self.certkey_count += 1
else:
converted_objs.append({'ssl_cert_key': ssl_kc_obj})
avi_config['SSLKeyAndCertificate'].append(ssl_kc_obj)
def update_ca_cert_obj(self, name, ca_cert_file_name, input_dir, tenant,
avi_config, converted_objs, merge_object_mapping,
sys_dict):
"""
This method create the certs if certificate not present at location
it create dummy certificate.
:param name: name of certificate.
:param key_file_name: name of keyfile of cert
:param cert_file_name: name of cert file
:param input_dir: location of cert and key
:param tenant: tenant name
:param avi_config: converted avi config dict
:param converted_objs: list of converted object profile
:param default_profile_name: name of default profile name.
:param key_and_cert_mapping_list: list of key and cert
:param merge_object_mapping: merged object dict for merging objects
:param sys_dict: baseline objects
:return:
"""
cert_name = [cert['name'] for cert in avi_config['SSLKeyAndCertificate']
if cert['name'] == name and
cert['type'] == 'SSL_CERTIFICATE_TYPE_CA']
if cert_name:
LOG.warning(
'SSL ca cert is already exist for %s is %s' % (
ca_cert_file_name, cert_name[0]))
return
folder_path = input_dir + os.path.sep
ca_cert = None
# Removed / from cert_file_name to get name of file.
if ca_cert_file_name:
if '/' in ca_cert_file_name:
ca_cert_file_name = ca_cert_file_name.split('/')[-1]
if ':' in ca_cert_file_name:
ca_cert_file_name = ca_cert_file_name.split(':')[-1]
ca_cert = conv_utils.upload_file(folder_path + ca_cert_file_name)
if ca_cert:
if not conv_utils.check_certificate_expiry(
input_dir, ca_cert_file_name):
ca_cert = None
if not ca_cert:
key, ca_cert = conv_utils.create_self_signed_cert()
name += '-dummy'
LOG.warning('Create self cerificate and key for : %s' % name)
ca_cert_obj = None
cert_name = name
if ca_cert_file_name and '.crt' in ca_cert_file_name:
if ':' in ca_cert_file_name:
ca_cert_file_name = ca_cert_file_name.split(':')[-1]
ca_cert_file_name = '%s.crt' % ca_cert_file_name.split('.crt')[0]
if not ca_cert_file_name:
ca_cert_file_name = name
if ca_cert:
cert = {"certificate": ca_cert}
ca_cert_obj = {
'name': ca_cert_file_name,
'tenant_ref': conv_utils.get_object_ref(tenant, 'tenant'),
'certificate': cert,
'type': 'SSL_CERTIFICATE_TYPE_CA'
}
LOG.info('Added new ca certificate for %s' % name)
if ca_cert_obj and self.object_merge_check:
if 'dummy' not in ca_cert_obj['name']:
conv_utils.update_skip_duplicates(
ca_cert_obj, avi_config['SSLKeyAndCertificate'],
'ssl_cert_key', converted_objs, name, None,
merge_object_mapping, None, self.prefix,
sys_dict['SSLKeyAndCertificate'])
else:
converted_objs.append({'ssl_cert_key': ca_cert_obj})
avi_config['SSLKeyAndCertificate'].append(ca_cert_obj)
self.certkey_count += 1
else:
converted_objs.append({'ssl_cert_key': ca_cert_obj})
avi_config['SSLKeyAndCertificate'].append(ca_cert_obj)
class ProfileConfigConvV11(ProfileConfigConv):
def __init__(self, f5_profile_attributes, object_merge_check, prefix,
keypassphrase):
"""
:param f5_profile_attributes: f5 profile attributes from yaml file.
:param object_merge_check: flag for merging objects
:param prefix: prefix for objects
:param keypassphrase: keypassphrase yaml file location
"""
self.supported_types = \
f5_profile_attributes['Profile_supported_types']
self.ignore_for_defaults = \
f5_profile_attributes['Profile_ignore_for_defaults']
self.default_key = "defaults-from"
self.na_ssl = f5_profile_attributes['Profile_na_ssl']
self.indirect_ssl = f5_profile_attributes['Profile_indirect_ssl']
self.supported_ssl = f5_profile_attributes['Profile_supported_ssl']
self.na_http = f5_profile_attributes['Profile_na_http']
self.supported_http = f5_profile_attributes['Profile_supported_http']
self.indirect_http = f5_profile_attributes['Profile_indirect_http']
self.na_dns = f5_profile_attributes['Profile_na_dns']
self.supported_dns = f5_profile_attributes['Profile_supported_dns']
self.indirect_dns = f5_profile_attributes['Profile_indirect_dns']
self.supported_hc = f5_profile_attributes['Profile_supported_hc']
| |
will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:rtype: FileProperties
.. admonition:: Example:
.. literalinclude:: ../samples/datalake_samples_upload_download.py
:start-after: [START get_file_properties]
:end-before: [END get_file_properties]
:language: python
:dedent: 4
:caption: Getting the properties for a file.
"""
return self._get_path_properties(cls=deserialize_file_properties, **kwargs) # pylint: disable=protected-access
def set_file_expiry(self, expiry_options, # type: str
expires_on=None, # type: Optional[Union[datetime, int]]
**kwargs):
# type: (str, Optional[Union[datetime, int]], **Any) -> None
"""Sets the time a file will expire and be deleted.
:param str expiry_options:
Required. Indicates mode of the expiry time.
Possible values include: 'NeverExpire', 'RelativeToCreation', 'RelativeToNow', 'Absolute'
:param datetime or int expires_on:
The time to set the file to expiry.
When expiry_options is RelativeTo*, expires_on should be an int in milliseconds.
If the type of expires_on is datetime, it should be in UTC time.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:rtype: None
"""
try:
expires_on = convert_datetime_to_rfc1123(expires_on)
except AttributeError:
expires_on = str(expires_on)
self._datalake_client_for_blob_operation.path \
.set_expiry(expiry_options, expires_on=expires_on, **kwargs) # pylint: disable=protected-access
def _upload_options( # pylint:disable=too-many-statements
self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]]
length=None, # type: Optional[int]
**kwargs
):
# type: (...) -> Dict[str, Any]
encoding = kwargs.pop('encoding', 'UTF-8')
if isinstance(data, six.text_type):
data = data.encode(encoding) # type: ignore
if length is None:
length = get_length(data)
if isinstance(data, bytes):
data = data[:length]
if isinstance(data, bytes):
stream = BytesIO(data)
elif hasattr(data, 'read'):
stream = data
elif hasattr(data, '__iter__'):
stream = IterStreamer(data, encoding=encoding)
else:
raise TypeError("Unsupported data type: {}".format(type(data)))
validate_content = kwargs.pop('validate_content', False)
content_settings = kwargs.pop('content_settings', None)
metadata = kwargs.pop('metadata', None)
max_concurrency = kwargs.pop('max_concurrency', 1)
kwargs['properties'] = add_metadata_headers(metadata)
kwargs['lease_access_conditions'] = get_access_conditions(kwargs.pop('lease', None))
kwargs['modified_access_conditions'] = get_mod_conditions(kwargs)
if content_settings:
kwargs['path_http_headers'] = get_path_http_headers(content_settings)
kwargs['stream'] = stream
kwargs['length'] = length
kwargs['validate_content'] = validate_content
kwargs['max_concurrency'] = max_concurrency
kwargs['client'] = self._client.path
return kwargs
def upload_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]]
length=None, # type: Optional[int]
overwrite=False, # type: Optional[bool]
**kwargs):
# type: (...) -> Dict[str, Any]
"""
Upload data to a file.
:param data: Content to be uploaded to file
:param int length: Size of the data in bytes.
:param bool overwrite: to overwrite an existing file or not.
:keyword ~azure.storage.filedatalake.ContentSettings content_settings:
ContentSettings object used to set path properties.
:keyword metadata:
Name-value pairs associated with the blob as metadata.
:paramtype metadata: dict(str, str)
:keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease:
Required if the blob has an active lease. Value can be a DataLakeLeaseClient object
or the lease ID as a string.
:keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
When creating a file or directory and the parent folder does not have a default ACL,
the umask restricts the permissions of the file or directory to be created.
The resulting permission is given by p & ^u, where p is the permission and u is the umask.
For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
The umask must be specified in 4-digit octal notation (e.g. 0766).
:keyword str permissions: Optional and only valid if Hierarchical Namespace
is enabled for the account. Sets POSIX access permissions for the file
owner, the file owning group, and others. Each class may be granted
read, write, or execute permission. The sticky bit is also supported.
Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
supported.
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:keyword int chunk_size:
The maximum chunk size for uploading a file in chunks.
Defaults to 100*1024*1024, or 100MB.
:return: response dict (Etag and last modified).
"""
options = self._upload_options(
data,
length=length,
overwrite=overwrite,
**kwargs)
return upload_datalake_file(**options)
@staticmethod
def _append_data_options(data, offset, length=None, **kwargs):
# type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any]
if isinstance(data, six.text_type):
data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore
if length is None:
length = get_length(data)
if length is None:
length, data = read_length(data)
if isinstance(data, bytes):
data = data[:length]
access_conditions = get_access_conditions(kwargs.pop('lease', None))
options = {
'body': data,
'position': offset,
'content_length': length,
'lease_access_conditions': access_conditions,
'validate_content': kwargs.pop('validate_content', False),
'timeout': kwargs.pop('timeout', None),
'cls': return_response_headers}
options.update(kwargs)
return options
def append_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]]
offset, # type: int
length=None, # type: Optional[int]
**kwargs):
# type: (...) -> Dict[str, Union[str, datetime, int]]
"""Append data to the file.
:param data: Content to be appended to file
:param offset: start position of the data to be appended to.
:param length: Size of the data in bytes.
:keyword bool validate_content:
If true, calculates an MD5 hash of the block content. The storage
service checks the hash of the content that has arrived
with the hash that was sent. This is primarily valuable for detecting
bitflips on the wire if using http instead of https as https (the default)
will already validate. Note that this MD5 hash is not stored with the
file.
:keyword lease:
Required if the file has an active lease. Value can be a DataLakeLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
:return: dict of the response header
.. admonition:: Example:
.. literalinclude:: ../samples/datalake_samples_upload_download.py
:start-after: [START append_data]
:end-before: [END append_data]
:language: python
:dedent: 4
:caption: Append data to the file.
"""
options = self._append_data_options(
data,
offset,
length=length,
**kwargs)
try:
return self._client.path.append_data(**options)
except HttpResponseError as error:
process_storage_error(error)
@staticmethod
def _flush_data_options(offset, content_settings=None, retain_uncommitted_data=False, **kwargs):
# type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any]
access_conditions = get_access_conditions(kwargs.pop('lease', None))
mod_conditions = get_mod_conditions(kwargs)
path_http_headers = None
if content_settings:
path_http_headers = get_path_http_headers(content_settings)
options = {
'position': offset,
'content_length': 0,
'path_http_headers': path_http_headers,
'retain_uncommitted_data': retain_uncommitted_data,
'close': kwargs.pop('close', False),
'lease_access_conditions': access_conditions,
'modified_access_conditions': mod_conditions,
'timeout': kwargs.pop('timeout', None),
'cls': return_response_headers}
options.update(kwargs)
return options
def flush_data(self, offset, # type: int
retain_uncommitted_data=False, # type: Optional[bool]
**kwargs):
# type: (...) -> Dict[str, Union[str, datetime]]
""" Commit the previous appended data.
:param offset: offset is equal to the length of the file after commit the
previous appended data.
:param bool retain_uncommitted_data: Valid only for flush operations. If
"true", uncommitted data is retained after the flush operation
completes; otherwise, | |
# Copyright (c) 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the GlusterFS driver module."""
import errno
import os
import tempfile
import mox as mox_lib
from mox import IgnoreArg
from mox import IsA
from mox import stubout
from cinder import context
from cinder import db
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import imageutils
from cinder.openstack.common import processutils as putils
from cinder import test
from cinder import units
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers import glusterfs
class DumbVolume(object):
fields = {}
def __setitem__(self, key, value):
self.fields[key] = value
def __getitem__(self, item):
return self.fields[item]
class GlusterFsDriverTestCase(test.TestCase):
"""Test case for GlusterFS driver."""
TEST_EXPORT1 = 'glusterfs-host1:/export'
TEST_EXPORT2 = 'glusterfs-host2:/export'
TEST_EXPORT2_OPTIONS = '-o backupvolfile-server=glusterfs-backup1'
TEST_SIZE_IN_GB = 1
TEST_MNT_POINT = '/mnt/glusterfs'
TEST_MNT_POINT_BASE = '/mnt/test'
TEST_LOCAL_PATH = '/mnt/glusterfs/volume-123'
TEST_FILE_NAME = 'test.txt'
TEST_SHARES_CONFIG_FILE = '/etc/cinder/test-shares.conf'
VOLUME_UUID = 'abcdefab-cdef-abcd-efab-cdefabcdefab'
SNAP_UUID = 'bacadaca-baca-daca-baca-dacadacadaca'
SNAP_UUID_2 = 'bebedede-bebe-dede-bebe-dedebebedede'
def setUp(self):
super(GlusterFsDriverTestCase, self).setUp()
self._mox = mox_lib.Mox()
self._configuration = mox_lib.MockObject(conf.Configuration)
self._configuration.append_config_values(mox_lib.IgnoreArg())
self._configuration.glusterfs_shares_config = \
self.TEST_SHARES_CONFIG_FILE
self._configuration.glusterfs_mount_point_base = \
self.TEST_MNT_POINT_BASE
self._configuration.glusterfs_disk_util = 'df'
self._configuration.glusterfs_sparsed_volumes = True
self._configuration.glusterfs_qcow2_volumes = False
self.stubs = stubout.StubOutForTesting()
self._driver =\
glusterfs.GlusterfsDriver(configuration=self._configuration)
self._driver.shares = {}
def tearDown(self):
self._mox.UnsetStubs()
self.stubs.UnsetAll()
super(GlusterFsDriverTestCase, self).tearDown()
def stub_out_not_replaying(self, obj, attr_name):
attr_to_replace = getattr(obj, attr_name)
stub = mox_lib.MockObject(attr_to_replace)
self.stubs.Set(obj, attr_name, stub)
def test_local_path(self):
"""local_path common use case."""
glusterfs.CONF.glusterfs_mount_point_base = self.TEST_MNT_POINT_BASE
drv = self._driver
volume = DumbVolume()
volume['provider_location'] = self.TEST_EXPORT1
volume['name'] = 'volume-123'
self.assertEqual(
'/mnt/test/ab03ab34eaca46a5fb81878f7e9b91fc/volume-123',
drv.local_path(volume))
def test_mount_glusterfs_should_mount_correctly(self):
"""_mount_glusterfs common case usage."""
mox = self._mox
drv = self._driver
mox.StubOutWithMock(drv, '_execute')
drv._execute('mkdir', '-p', self.TEST_MNT_POINT)
drv._execute('mount', '-t', 'glusterfs', self.TEST_EXPORT1,
self.TEST_MNT_POINT, run_as_root=True)
mox.ReplayAll()
drv._mount_glusterfs(self.TEST_EXPORT1, self.TEST_MNT_POINT)
mox.VerifyAll()
def test_mount_glusterfs_should_suppress_already_mounted_error(self):
"""_mount_glusterfs should suppress already mounted error if
ensure=True
"""
mox = self._mox
drv = self._driver
mox.StubOutWithMock(drv, '_execute')
drv._execute('mkdir', '-p', self.TEST_MNT_POINT)
drv._execute('mount', '-t', 'glusterfs', self.TEST_EXPORT1,
self.TEST_MNT_POINT, run_as_root=True).\
AndRaise(putils.ProcessExecutionError(
stderr='is busy or already mounted'))
mox.ReplayAll()
drv._mount_glusterfs(self.TEST_EXPORT1, self.TEST_MNT_POINT,
ensure=True)
mox.VerifyAll()
def test_mount_glusterfs_should_reraise_already_mounted_error(self):
"""_mount_glusterfs should not suppress already mounted error
if ensure=False
"""
mox = self._mox
drv = self._driver
mox.StubOutWithMock(drv, '_execute')
drv._execute('mkdir', '-p', self.TEST_MNT_POINT)
drv._execute(
'mount',
'-t',
'glusterfs',
self.TEST_EXPORT1,
self.TEST_MNT_POINT,
run_as_root=True). \
AndRaise(putils.ProcessExecutionError(stderr='is busy or '
'already mounted'))
mox.ReplayAll()
self.assertRaises(putils.ProcessExecutionError, drv._mount_glusterfs,
self.TEST_EXPORT1, self.TEST_MNT_POINT,
ensure=False)
mox.VerifyAll()
def test_mount_glusterfs_should_create_mountpoint_if_not_yet(self):
"""_mount_glusterfs should create mountpoint if it doesn't exist."""
mox = self._mox
drv = self._driver
mox.StubOutWithMock(drv, '_execute')
drv._execute('mkdir', '-p', self.TEST_MNT_POINT)
drv._execute(*([IgnoreArg()] * 5), run_as_root=IgnoreArg())
mox.ReplayAll()
drv._mount_glusterfs(self.TEST_EXPORT1, self.TEST_MNT_POINT)
mox.VerifyAll()
def test_get_hash_str(self):
"""_get_hash_str should calculation correct value."""
drv = self._driver
self.assertEqual('ab03ab34eaca46a5fb81878f7e9b91fc',
drv._get_hash_str(self.TEST_EXPORT1))
def test_get_mount_point_for_share(self):
"""_get_mount_point_for_share should calculate correct value."""
drv = self._driver
glusterfs.CONF.glusterfs_mount_point_base = self.TEST_MNT_POINT_BASE
self.assertEqual('/mnt/test/ab03ab34eaca46a5fb81878f7e9b91fc',
drv._get_mount_point_for_share(
self.TEST_EXPORT1))
def test_get_available_capacity_with_df(self):
"""_get_available_capacity should calculate correct value."""
mox = self._mox
drv = self._driver
df_total_size = 2620544
df_avail = 1490560
df_head = 'Filesystem 1K-blocks Used Available Use% Mounted on\n'
df_data = 'glusterfs-host:/export %d 996864 %d 41%% /mnt' % \
(df_total_size, df_avail)
df_output = df_head + df_data
setattr(glusterfs.CONF, 'glusterfs_disk_util', 'df')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
drv._get_mount_point_for_share(self.TEST_EXPORT1).\
AndReturn(self.TEST_MNT_POINT)
mox.StubOutWithMock(drv, '_execute')
drv._execute('df', '--portability', '--block-size', '1',
self.TEST_MNT_POINT,
run_as_root=True).AndReturn((df_output, None))
mox.ReplayAll()
self.assertEqual((df_avail, df_total_size),
drv._get_available_capacity(self.TEST_EXPORT1))
mox.VerifyAll()
delattr(glusterfs.CONF, 'glusterfs_disk_util')
def test_load_shares_config(self):
mox = self._mox
drv = self._driver
drv.configuration.glusterfs_shares_config = (
self.TEST_SHARES_CONFIG_FILE)
mox.StubOutWithMock(drv, '_read_config_file')
config_data = []
config_data.append(self.TEST_EXPORT1)
config_data.append('#' + self.TEST_EXPORT2)
config_data.append(self.TEST_EXPORT2 + ' ' + self.TEST_EXPORT2_OPTIONS)
config_data.append('broken:share_format')
config_data.append('')
drv._read_config_file(self.TEST_SHARES_CONFIG_FILE).\
AndReturn(config_data)
mox.ReplayAll()
drv._load_shares_config(drv.configuration.glusterfs_shares_config)
self.assertIn(self.TEST_EXPORT1, drv.shares)
self.assertIn(self.TEST_EXPORT2, drv.shares)
self.assertEqual(len(drv.shares), 2)
self.assertEqual(drv.shares[self.TEST_EXPORT2],
self.TEST_EXPORT2_OPTIONS)
mox.VerifyAll()
def test_ensure_share_mounted(self):
"""_ensure_share_mounted simple use case."""
mox = self._mox
drv = self._driver
mox.StubOutWithMock(utils, 'get_file_mode')
mox.StubOutWithMock(utils, 'get_file_gid')
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_ensure_share_writable')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
drv._get_mount_point_for_share(self.TEST_EXPORT1).\
AndReturn(self.TEST_MNT_POINT)
mox.StubOutWithMock(drv, '_mount_glusterfs')
drv._mount_glusterfs(self.TEST_EXPORT1, self.TEST_MNT_POINT,
ensure=True)
utils.get_file_gid(self.TEST_MNT_POINT).AndReturn(333333)
utils.get_file_mode(self.TEST_MNT_POINT).AndReturn(0o777)
drv._ensure_share_writable(self.TEST_MNT_POINT)
drv._execute('chgrp', IgnoreArg(), self.TEST_MNT_POINT,
run_as_root=True)
mox.ReplayAll()
drv._ensure_share_mounted(self.TEST_EXPORT1)
mox.VerifyAll()
def test_ensure_shares_mounted_should_save_mounting_successfully(self):
"""_ensure_shares_mounted should save share if mounted with success."""
mox = self._mox
drv = self._driver
mox.StubOutWithMock(drv, '_read_config_file')
config_data = []
config_data.append(self.TEST_EXPORT1)
drv._read_config_file(self.TEST_SHARES_CONFIG_FILE).\
AndReturn(config_data)
mox.StubOutWithMock(drv, '_ensure_share_mounted')
drv._ensure_share_mounted(self.TEST_EXPORT1)
mox.ReplayAll()
drv._ensure_shares_mounted()
self.assertEqual(1, len(drv._mounted_shares))
self.assertEqual(self.TEST_EXPORT1, drv._mounted_shares[0])
mox.VerifyAll()
def test_ensure_shares_mounted_should_not_save_mounting_with_error(self):
"""_ensure_shares_mounted should not save share if failed to mount."""
mox = self._mox
drv = self._driver
mox.StubOutWithMock(drv, '_read_config_file')
config_data = []
config_data.append(self.TEST_EXPORT1)
drv._read_config_file(self.TEST_SHARES_CONFIG_FILE).\
AndReturn(config_data)
mox.StubOutWithMock(drv, '_ensure_share_mounted')
drv._ensure_share_mounted(self.TEST_EXPORT1).AndRaise(Exception())
mox.ReplayAll()
drv._ensure_shares_mounted()
self.assertEqual(0, len(drv._mounted_shares))
mox.VerifyAll()
def test_setup_should_throw_error_if_shares_config_not_configured(self):
"""do_setup should throw error if shares config is not configured."""
drv = self._driver
glusterfs.CONF.glusterfs_shares_config = self.TEST_SHARES_CONFIG_FILE
self.assertRaises(exception.GlusterfsException,
drv.do_setup, IsA(context.RequestContext))
def test_setup_should_throw_exception_if_client_is_not_installed(self):
"""do_setup should throw exception if client is not installed."""
mox = self._mox
drv = self._driver
glusterfs.CONF.glusterfs_shares_config = self.TEST_SHARES_CONFIG_FILE
mox.StubOutWithMock(os.path, 'exists')
os.path.exists(self.TEST_SHARES_CONFIG_FILE).AndReturn(True)
mox.StubOutWithMock(drv, '_execute')
drv._execute('mount.glusterfs', check_exit_code=False).\
AndRaise(OSError(errno.ENOENT, 'No such file or directory'))
mox.ReplayAll()
self.assertRaises(exception.GlusterfsException,
drv.do_setup, IsA(context.RequestContext))
mox.VerifyAll()
def _fake_load_shares_config(self, conf):
self._driver.shares = {'127.7.7.7:/gluster1': None}
def _fake_NamedTemporaryFile(self, prefix=None, dir=None):
raise OSError('Permission denied!')
def test_setup_set_share_permissions(self):
mox = self._mox
drv = self._driver
glusterfs.CONF.glusterfs_shares_config = self.TEST_SHARES_CONFIG_FILE
self.stubs.Set(drv, '_load_shares_config',
self._fake_load_shares_config)
self.stubs.Set(tempfile, 'NamedTemporaryFile',
self._fake_NamedTemporaryFile)
mox.StubOutWithMock(os.path, 'exists')
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(utils, 'get_file_gid')
mox.StubOutWithMock(utils, 'get_file_mode')
mox.StubOutWithMock(os, 'getegid')
drv._execute('mount.glusterfs', check_exit_code=False)
drv._execute('mkdir', '-p', mox_lib.IgnoreArg())
os.path.exists(self.TEST_SHARES_CONFIG_FILE).AndReturn(True)
drv._execute('mount', '-t', 'glusterfs', '127.7.7.7:/gluster1',
mox_lib.IgnoreArg(), run_as_root=True)
utils.get_file_gid(mox_lib.IgnoreArg()).AndReturn(33333)
# perms not writable
utils.get_file_mode(mox_lib.IgnoreArg()).AndReturn(0o000)
os.getegid().AndReturn(888)
drv._execute('chgrp', 888, mox_lib.IgnoreArg(), run_as_root=True)
drv._execute('chmod', 'g+w', mox_lib.IgnoreArg(), run_as_root=True)
mox.ReplayAll()
drv.do_setup(IsA(context.RequestContext))
mox.VerifyAll()
def test_find_share_should_throw_error_if_there_is_no_mounted_shares(self):
"""_find_share should throw error if there is no mounted shares."""
drv = self._driver
drv._mounted_shares = []
self.assertRaises(exception.GlusterfsNoSharesMounted,
drv._find_share,
self.TEST_SIZE_IN_GB)
def test_find_share(self):
"""_find_share simple use case."""
mox = self._mox
drv = self._driver
drv._mounted_shares = [self.TEST_EXPORT1, self.TEST_EXPORT2]
mox.StubOutWithMock(drv, '_get_available_capacity')
drv._get_available_capacity(self.TEST_EXPORT1).\
AndReturn((2 * units.GiB, 5 * units.GiB))
drv._get_available_capacity(self.TEST_EXPORT2).\
AndReturn((3 * units.GiB, 10 * units.GiB))
mox.ReplayAll()
self.assertEqual(self.TEST_EXPORT2,
drv._find_share(self.TEST_SIZE_IN_GB))
mox.VerifyAll()
def test_find_share_should_throw_error_if_there_is_no_enough_place(self):
"""_find_share should throw error if there is no share to host vol."""
mox = self._mox
drv = self._driver
drv._mounted_shares = [self.TEST_EXPORT1,
self.TEST_EXPORT2]
mox.StubOutWithMock(drv, '_get_available_capacity')
drv._get_available_capacity(self.TEST_EXPORT1).\
AndReturn((0, 5 * units.GiB))
drv._get_available_capacity(self.TEST_EXPORT2).\
AndReturn((0, 10 * units.GiB))
mox.ReplayAll()
self.assertRaises(exception.GlusterfsNoSuitableShareFound,
drv._find_share,
self.TEST_SIZE_IN_GB)
mox.VerifyAll()
def _simple_volume(self, id=None):
volume = DumbVolume()
volume['provider_location'] = self.TEST_EXPORT1
if id is None:
volume['id'] = self.VOLUME_UUID
else:
volume['id'] = id
# volume['name'] mirrors format from db/sqlalchemy/models.py
volume['name'] = 'volume-%s' % volume['id']
volume['size'] = 10
volume['status'] = 'available'
return volume
def test_create_sparsed_volume(self):
mox = self._mox
drv = self._driver
volume = self._simple_volume()
setattr(glusterfs.CONF, 'glusterfs_sparsed_volumes', True)
mox.StubOutWithMock(drv, '_create_sparsed_file')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
drv._create_sparsed_file(IgnoreArg(), IgnoreArg())
drv._set_rw_permissions_for_all(IgnoreArg())
mox.ReplayAll()
drv._do_create_volume(volume)
mox.VerifyAll()
delattr(glusterfs.CONF, 'glusterfs_sparsed_volumes')
def test_create_nonsparsed_volume(self):
mox = self._mox
drv = self._driver
volume = self._simple_volume()
old_value = self._configuration.glusterfs_sparsed_volumes
self._configuration.glusterfs_sparsed_volumes = False
mox.StubOutWithMock(drv, '_create_regular_file')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
drv._create_regular_file(IgnoreArg(), IgnoreArg())
drv._set_rw_permissions_for_all(IgnoreArg())
mox.ReplayAll()
drv._do_create_volume(volume)
mox.VerifyAll()
self._configuration.glusterfs_sparsed_volumes = old_value
def test_create_qcow2_volume(self):
(mox, drv) = self._mox, self._driver
volume = self._simple_volume()
old_value = self._configuration.glusterfs_qcow2_volumes
self._configuration.glusterfs_qcow2_volumes = True
mox.StubOutWithMock(drv, '_execute')
hashed = drv._get_hash_str(volume['provider_location'])
path = '%s/%s/volume-%s' % (self.TEST_MNT_POINT_BASE,
hashed,
self.VOLUME_UUID)
drv._execute('qemu-img', 'create', '-f', 'qcow2',
'-o', 'preallocation=metadata', path,
str(volume['size'] * units.GiB),
run_as_root=True)
drv._execute('chmod', 'ugo+rw', path, run_as_root=True)
mox.ReplayAll()
drv._do_create_volume(volume)
mox.VerifyAll()
self._configuration.glusterfs_qcow2_volumes = old_value
def test_create_volume_should_ensure_glusterfs_mounted(self):
"""create_volume ensures shares provided in config are mounted."""
mox = self._mox
drv = self._driver
self.stub_out_not_replaying(glusterfs, 'LOG')
self.stub_out_not_replaying(drv, '_find_share')
self.stub_out_not_replaying(drv, '_do_create_volume')
mox.StubOutWithMock(drv, '_ensure_shares_mounted')
drv._ensure_shares_mounted()
mox.ReplayAll()
volume = DumbVolume()
volume['size'] = self.TEST_SIZE_IN_GB
drv.create_volume(volume)
mox.VerifyAll()
def test_create_volume_should_return_provider_location(self):
"""create_volume should return provider_location with found share."""
mox = self._mox
drv = self._driver
self.stub_out_not_replaying(glusterfs, 'LOG')
self.stub_out_not_replaying(drv, '_ensure_shares_mounted')
self.stub_out_not_replaying(drv, '_do_create_volume')
mox.StubOutWithMock(drv, '_find_share')
drv._find_share(self.TEST_SIZE_IN_GB).AndReturn(self.TEST_EXPORT1)
mox.ReplayAll()
volume = DumbVolume()
volume['size'] = self.TEST_SIZE_IN_GB
result = drv.create_volume(volume)
self.assertEqual(self.TEST_EXPORT1, result['provider_location'])
mox.VerifyAll()
def test_create_cloned_volume(self):
(mox, drv) = self._mox, self._driver
mox.StubOutWithMock(drv, '_create_snapshot')
mox.StubOutWithMock(drv, '_delete_snapshot')
mox.StubOutWithMock(drv, '_read_info_file')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_copy_volume_from_snapshot')
volume_file = 'volume-%s' % self.VOLUME_UUID
volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE,
drv._get_hash_str(self.TEST_EXPORT1),
volume_file)
volume = self._simple_volume()
src_vref = self._simple_volume()
src_vref['id'] = '375e32b2-804a-49f2-b282-85d1d5a5b9e1'
src_vref['name'] = 'volume-%s' % src_vref['id']
volume_file = 'volume-%s' % src_vref['id']
volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE,
drv._get_hash_str(self.TEST_EXPORT1),
volume_file)
src_info_path = '%s.info' % volume_path
volume_ref = {'id': volume['id'],
'name': volume['name'],
'status': volume['status'],
'provider_location': volume['provider_location'],
'size': volume['size']}
snap_ref = {'volume_name': src_vref['name'],
'name': 'clone-snap-%s' % src_vref['id'],
'size': src_vref['size'],
'volume_size': src_vref['size'],
'volume_id': src_vref['id'],
'id': 'tmp-snap-%s' % src_vref['id'],
'volume': src_vref}
drv._create_snapshot(snap_ref)
snap_info = {'active': volume_file,
snap_ref['id']: volume_path + '-clone'}
drv._read_info_file(src_info_path).AndReturn(snap_info)
drv._copy_volume_from_snapshot(snap_ref, volume_ref, volume['size'])
drv._delete_snapshot(mox_lib.IgnoreArg())
mox.ReplayAll()
drv.create_cloned_volume(volume, src_vref)
def test_delete_volume(self):
"""delete_volume simple test case."""
mox = self._mox
drv = self._driver
self.stub_out_not_replaying(drv, '_ensure_share_mounted')
volume = DumbVolume()
volume['name'] = 'volume-123'
volume['provider_location'] = self.TEST_EXPORT1
mox.StubOutWithMock(drv, 'local_path')
drv.local_path(volume).AndReturn(self.TEST_LOCAL_PATH)
mox.StubOutWithMock(drv, '_execute')
drv._execute('rm', '-f', self.TEST_LOCAL_PATH, run_as_root=True)
mox.ReplayAll()
drv.delete_volume(volume)
mox.VerifyAll()
def test_delete_should_ensure_share_mounted(self):
"""delete_volume should ensure that corresponding share is mounted."""
mox = self._mox
drv = self._driver
self.stub_out_not_replaying(drv, '_execute')
volume = DumbVolume()
volume['name'] = 'volume-123'
| |
<gh_stars>0
#!/usr/bin/env python
"""A data store server."""
from BaseHTTPServer import BaseHTTPRequestHandler
from BaseHTTPServer import HTTPServer
import socket
import SocketServer
import time
import urlparse
import uuid
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top
try:
import urllib3
from urllib3 import connectionpool
except ImportError:
# Urllib3 also comes as part of requests, try to fallback.
from requests.packages import urllib3
from requests.packages.urllib3 import connectionpool
from grr.lib import server_plugins
# pylint: enable=unused-import,g-bad-import-order,g-import-not-at-top
import logging
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import flags
from grr.lib import registry
from grr.lib import startup
from grr.lib import stats
from grr.lib import utils
from grr.lib.rdfvalues import data_server as rdf_data_server
from grr.lib.rdfvalues import data_store as rdf_data_store
from grr.server.data_server import auth
from grr.server.data_server import constants
from grr.server.data_server import errors
from grr.server.data_server import master
from grr.server.data_server import rebalance
from grr.server.data_server import store
from grr.server.data_server import utils as sutils
flags.DEFINE_integer("port", None, "Specify the data server port.")
flags.DEFINE_string("path", None, "Specify the data store path.")
flags.DEFINE_bool("master", False, "Mark this data server as the master.")
class DataServerHandler(BaseHTTPRequestHandler, object):
"""Handler for HTTP requests to the data server."""
# Data store service.
SERVICE = None
# MASTER is set if this data server is also running as master.
MASTER = None
# Set if the server is not the master.
DATA_SERVER = None
# Mapping information sent/created by the master.
MAPPING = None
CMDTABLE = None
# Nonce store used for authentication.
NONCE_STORE = None
@classmethod
def InitMasterServer(cls, port):
cls.MASTER = master.DataMaster(port, cls.SERVICE)
cls.MAPPING = cls.MASTER.LoadMapping()
# Master is the only data server that knows about the client credentials.
# The credentials will be sent to other data servers once they login.
creds = auth.ClientCredentials()
creds.InitializeFromConfig()
cls.NONCE_STORE.SetClientCredentials(creds)
logging.info("Starting Data Master/Server on port %d ...", port)
@classmethod
def InitDataServer(cls, port):
"""Initiates regular data server."""
cls.DATA_SERVER = StandardDataServer(port, cls)
# Connect to master server.
cls.DATA_SERVER.Register()
cls.MAPPING = cls.DATA_SERVER.LoadMapping()
cls.DATA_SERVER.PeriodicallySendStatistics()
logging.info("Starting Data Server on port %d ...", port)
@classmethod
def InitHandlerTables(cls):
"""Initializes tables of handler callbacks."""
cls.HTTP_TABLE = {
"/manage": cls.HandleManager,
"/server/handshake": cls.HandleServerHandshake,
"/server/register": cls.HandleRegister,
"/server/state": cls.HandleState,
"/server/mapping": cls.HandleMapping,
"/client/start": cls.HandleDataStoreService,
"/client/handshake": cls.HandleClientHandshake,
"/client/mapping": cls.HandleMapping,
"/rebalance/phase1": cls.HandleRebalancePhase1,
"/rebalance/phase2": cls.HandleRebalancePhase2,
"/rebalance/statistics": cls.HandleRebalanceStatistics,
"/rebalance/copy": cls.HandleRebalanceCopy,
"/rebalance/commit": cls.HandleRebalanceCommit,
"/rebalance/perform": cls.HandleRebalancePerform,
"/rebalance/recover": cls.HandleRebalanceRecover,
"/servers/add/check": cls.HandleServerAddCheck,
"/servers/add": cls.HandleServerAdd,
"/servers/rem/check": cls.HandleServerRemCheck,
"/servers/rem": cls.HandleServerRem,
"/servers/sync": cls.HandleServerSync,
"/servers/sync-all": cls.HandleServerSyncAll
}
cls.STREAMING_TABLE = {
"/rebalance/copy-file": cls.HandleRebalanceCopyFile,
}
@classmethod
def GetStatistics(cls):
"""Build statistics object for the server."""
ok = rdf_data_server.DataServerState.Status.AVAILABLE
num_components, avg_component = cls.SERVICE.GetComponentInformation()
stat = rdf_data_server.DataServerState(size=cls.SERVICE.Size(),
load=0,
status=ok,
num_components=num_components,
avg_component=avg_component)
return stat
protocol_version = "HTTP/1.1"
# How much to wait.
CLIENT_TIMEOUT_TIME = 600
SEND_TIMEOUT = 5
READ_TIMEOUT = 5
LOGIN_TIMEOUT = 5
def __init__(self, request, client_address, server):
# Data server reference for the master.
self.data_server = None
self.rebalance_id = None
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
def _Response(self, code, body):
reply = ("HTTP/1.1 " + str(code) + " OK\n"
"Content-Length: " + str(len(body)) + "\n"
"\n" + body)
self.wfile.write(reply)
return
def _EmptyResponse(self, code):
return self._Response(code, "")
def _ReadExactly(self, sock, n):
ret = ""
left = n
while left:
ret += sock.recv(left)
left = n - len(ret)
return ret
def _ReadExactlyFailAfterFirst(self, sock, n):
"""Read from socket but return nothing if we get an exception."""
ret = ""
left = n
while left:
try:
ret += sock.recv(left)
if not ret:
return ""
left = n - len(ret)
except socket.timeout:
if left < n:
# We have already read some data, so we just give up.
return ""
except socket.error:
return ""
return ret
def HandleClient(self, sock, permissions):
"""Handles new client requests readable from 'read'."""
# Use a long timeout here.
sock.settimeout(self.CLIENT_TIMEOUT_TIME)
cmdlen_str = self._ReadExactlyFailAfterFirst(sock, sutils.SIZE_PACKER.size)
if not cmdlen_str:
return ""
cmdlen = sutils.SIZE_PACKER.unpack(cmdlen_str)[0]
# Full request must be here.
sock.settimeout(self.READ_TIMEOUT)
try:
cmd_str = self._ReadExactly(sock, cmdlen)
except (socket.timeout, socket.error):
return ""
cmd = rdf_data_server.DataStoreCommand(cmd_str)
request = cmd.request
op = cmd.command
cmdinfo = self.CMDTABLE.get(op)
if not cmdinfo:
logging.error("Unrecognized command %d", op)
return ""
method, perm = cmdinfo
if perm in permissions:
response = method(request)
else:
status_desc = ("Operation not allowed: required %s but only have "
"%s permissions" % (perm, permissions))
resp = rdf_data_store.DataStoreResponse(
request=cmd.request,
status_desc=status_desc,
status=rdf_data_store.DataStoreResponse.Status.AUTHORIZATION_DENIED)
response = resp.SerializeToString()
return sutils.SIZE_PACKER.pack(len(response)) + response
def HandleRegister(self):
"""Registers a data server in the master."""
if not self.MASTER:
self._EmptyResponse(constants.RESPONSE_NOT_MASTER_SERVER)
return
request = rdf_data_server.DataStoreRegistrationRequest(self.post_data)
port = request.port
addr = self.client_address[0]
token = request.token
if not self.NONCE_STORE.ValidateAuthTokenServer(token):
self._EmptyResponse(constants.RESPONSE_SERVER_NOT_AUTHORIZED)
return
newserver = self.MASTER.RegisterServer(addr, port)
if newserver:
self.data_server = newserver
index = newserver.Index()
body = sutils.SIZE_PACKER.pack(index)
# Need to send back the encrypted client credentials.
body += self.NONCE_STORE.EncryptClientCredentials()
self._Response(constants.RESPONSE_OK, body)
else:
# Could not register the Data Server.
logging.warning("Could not register server %s:%d. Maybe not allowed?",
addr, port)
self._EmptyResponse(constants.RESPONSE_SERVER_NOT_ALLOWED)
def HandleState(self):
"""Respond to /server/state."""
if not self.MASTER:
self._EmptyResponse(constants.RESPONSE_NOT_MASTER_SERVER)
return
if not self.data_server:
logging.error("Server %s attempting to update its state but "
"is not registered yet", self.client_address)
self._EmptyResponse(constants.RESPONSE_SERVER_NOT_REGISTERED)
return
state = rdf_data_server.DataServerState(self.post_data)
self.data_server.UpdateState(state)
logging.info("Received new state from server %s", self.client_address)
# Response with our mapping.
body = self.MAPPING.SerializeToString()
self._Response(constants.RESPONSE_OK, body)
def HandleHandshake(self):
"""Return a nonce to either a server or client."""
nonce = self.NONCE_STORE.NewNonce()
if not nonce:
raise errors.DataServerError("Could not generate new nonces! Too many "
"requests and/or clients.")
self._Response(constants.RESPONSE_OK, nonce)
def HandleClientHandshake(self):
"""Starts the handshake with data server clients."""
self.HandleHandshake()
def HandleServerHandshake(self):
"""Starts the handshake with data servers."""
if not self.MASTER:
self._EmptyResponse(constants.RESPONSE_NOT_MASTER_SERVER)
return
self.HandleHandshake()
def HandleDataStoreService(self):
"""Initiate a conversation for handling data store commands."""
if self.data_server:
# If the data server is connected, this does not make sense.
self._EmptyResponse(constants.RESPONSE_NOT_A_CLIENT)
return
# We never return anything for this request.
# Simply use the socket and serve database requests.
sock = self.connection
sock.setblocking(1)
# But first we need to validate the client by reading the token.
token = rdf_data_server.DataStoreAuthToken(self.post_data)
perms = self.NONCE_STORE.ValidateAuthTokenClient(token)
if not perms:
sock.sendall("IP\n")
sock.close()
self.close_connection = 1
return
logging.info("Client %s has started using the data server",
self.client_address)
try:
# Send handshake.
sock.settimeout(self.LOGIN_TIMEOUT) # 10 seconds to login.
sock.sendall("OK\n")
except (socket.error, socket.timeout):
logging.warning("Could not login client %s", self.client_address)
self.close_connection = 1
return
while True:
# Handle requests
replybody = self.HandleClient(sock, perms)
if not replybody:
# Client probably died or there was an error in the connection.
# Force the client to reconnect and send the command again.
sock.close()
self.close_connection = 1
return
try:
sock.settimeout(self.SEND_TIMEOUT) # 1 minute timeout.
sock.sendall(replybody)
except (socket.error, socket.timeout):
# At this point, there is no way to know how much data was actually
# sent. Therefore, we close the connection and force the client to
# reconnect. When the client gets an error, he should assume that
# the command was not successful
sock.close()
self.close_connection = 1
return
def HandleMapping(self):
"""Returns the mapping to a client or server."""
if not self.MAPPING:
self._EmptyResponse(constants.RESPONSE_MAPPING_NOT_FOUND)
return
body = self.MAPPING.SerializeToString()
self._Response(constants.RESPONSE_OK, body)
def HandleManager(self):
if not self.MASTER:
self._EmptyResponse(constants.RESPONSE_NOT_MASTER_SERVER)
return
# Response with our mapping.
body = self.MAPPING.SerializeToString()
self._Response(constants.RESPONSE_OK, body)
def HandleRebalancePhase1(self):
"""Call master to perform phase 1 of the rebalancing operation."""
if not self.MASTER:
self._EmptyResponse(constants.RESPONSE_NOT_MASTER_SERVER)
return
if self.MASTER.IsRebalancing():
self._EmptyResponse(constants.RESPONSE_MASTER_IS_REBALANCING)
return
new_mapping = rdf_data_server.DataServerMapping(self.post_data)
rebalance_id = str(uuid.uuid4())
reb = rdf_data_server.DataServerRebalance(id=rebalance_id,
mapping=new_mapping)
if not self.MASTER.SetRebalancing(reb):
logging.warning("Could not contact servers for rebalancing")
self._EmptyResponse(constants.RESPONSE_DATA_SERVERS_UNREACHABLE)
return
if not self.MASTER.FetchRebalanceInformation():
logging.warning("Could not contact servers for rebalancing statistics")
self._EmptyResponse(constants.RESPONSE_DATA_SERVERS_UNREACHABLE)
return
self.rebalance_id = rebalance_id
body = reb.SerializeToString()
self._Response(constants.RESPONSE_OK, body)
def HandleRebalanceStatistics(self):
"""Call data server to count how much data needs to move in rebalancing."""
reb = rdf_data_server.DataServerRebalance(self.post_data)
mapping = reb.mapping
index = 0
if not self.MASTER:
index = self.DATA_SERVER.Index()
moving = rebalance.ComputeRebalanceSize(mapping, index)
reb.moving.Append(moving)
body = reb.SerializeToString()
self._Response(constants.RESPONSE_OK, body)
def HandleRebalanceCopy(self):
reb = rdf_data_server.DataServerRebalance(self.post_data)
index = 0
if not self.MASTER:
index = self.DATA_SERVER.Index()
rebalance.CopyFiles(reb, index)
self._EmptyResponse(constants.RESPONSE_OK)
def HandleRebalanceCopyFile(self):
if not rebalance.SaveTemporaryFile(self.rfile):
return self._EmptyResponse(constants.RESPONSE_FILE_NOT_SAVED)
self._EmptyResponse(constants.RESPONSE_OK)
def HandleRebalancePhase2(self):
"""Call master to perform phase 2 of rebalancing."""
if not self.MASTER:
self._EmptyResponse(constants.RESPONSE_NOT_MASTER_SERVER)
return
reb = rdf_data_server.DataServerRebalance(self.post_data)
current = self.MASTER.IsRebalancing()
if not current or current.id != reb.id:
# Not the same ID.
self._EmptyResponse(constants.RESPONSE_WRONG_TRANSACTION)
return
if not self.MASTER.CopyRebalanceFiles():
self._EmptyResponse(constants.RESPONSE_FILES_NOT_COPIED)
return
self._EmptyResponse(constants.RESPONSE_OK)
def HandleRebalanceCommit(self):
"""Call master to commit rebalance transaction."""
if not self.MASTER:
self._EmptyResponse(constants.RESPONSE_NOT_MASTER_SERVER)
return
reb = rdf_data_server.DataServerRebalance(self.post_data)
current = self.MASTER.IsRebalancing()
if not current or current.id != reb.id:
# Not the same ID.
self._EmptyResponse(constants.RESPONSE_WRONG_TRANSACTION)
return
new_mapping = self.MASTER.RebalanceCommit()
if not new_mapping:
self._EmptyResponse(constants.RESPONSE_NOT_COMMITED)
return
self._Response(constants.RESPONSE_OK, self.MAPPING.SerializeToString())
def HandleRebalancePerform(self):
"""Call data server to perform rebalance transaction."""
reb = rdf_data_server.DataServerRebalance(self.post_data)
if not rebalance.MoveFiles(reb, self.MASTER):
logging.critical("Failed to | |
import sys
sys.path.append('../')
from DeepInterpretablePolynomialNeuralNetwork.src.generate_synthetic_data import boolean_concept_uniform_distribution
from DeepInterpretablePolynomialNeuralNetwork.src.deep_interpretable_polynomial_neural_network import DeepInterpretablePolynomialNeuralNetwork, GrowthPolicy
from DeepInterpretablePolynomialNeuralNetwork.src.evaluation_tools import EvaluationTools
def experiment_all_terms_degree1():
''' In paper: table 2, line
'''
# Data
conjunctions = [[1],[3]]
p = 0.5
dataset_size = 1000
no_variables = 20
X, Y = boolean_concept_uniform_distribution(conjunctions, p, dataset_size, no_variables)
# Model
d_max = 1
balance = 1.0
lambda_param = 1.0
ro = 1.0
fixed_margin = False
coeff_magnitude_th = 0.01
growth_policy=GrowthPolicy.ALL_TERMS
dipnn = DeepInterpretablePolynomialNeuralNetwork(d_max=d_max, lambda_param=lambda_param, balance=balance, fixed_margin=fixed_margin, ro=ro, derivative_magnitude_th=0.0, coeff_magnitude_th=coeff_magnitude_th,
max_no_terms_per_iteration=20, max_no_terms=200, growth_policy=growth_policy)
# Evaluation
no_runs = 100
test_size = 0.2
coefficient_threshold = 0.01
precision = 2
EvaluationTools.evaluate_multiple_times(dipnn, X, Y, no_runs, test_size, coefficient_threshold, precision)
print(f'The margin: {dipnn.ro}')
def experiment_all_terms_degree2():
''' In paper: table 2, line
'''
# Data
conjunctions = [[1,2],[3, 4]]
p = 0.75
dataset_size = 1000
no_variables = 20
X, Y = boolean_concept_uniform_distribution(conjunctions, p, dataset_size, no_variables)
# Model
d_max = 2
balance = 1.0
lambda_param = 1.0
ro = 1.0
fixed_margin = False
coeff_magnitude_th = 0.01
growth_policy=GrowthPolicy.ALL_TERMS
dipnn = DeepInterpretablePolynomialNeuralNetwork(d_max=d_max, lambda_param=lambda_param, balance=balance, fixed_margin=fixed_margin, ro=ro, derivative_magnitude_th=0.0, coeff_magnitude_th=coeff_magnitude_th,
max_no_terms_per_iteration=20, max_no_terms=200, growth_policy=growth_policy)
# Evaluation
no_runs = 100
test_size = 0.2
coefficient_threshold = 0.01
precision = 2
EvaluationTools.evaluate_multiple_times(dipnn, X, Y, no_runs, test_size, coefficient_threshold, precision)
print(f'The margin: {dipnn.ro}')
def experiment_all_terms_degree3():
''' In paper: table 2, line
'''
# Data
conjunctions = [[1,2,10],[3, 4,8]]
p = 0.75
dataset_size = 1000
no_variables = 20
X, Y = boolean_concept_uniform_distribution(conjunctions, p, dataset_size, no_variables)
# Model
d_max = 3
balance = 1.0
lambda_param = 1.0
ro = 1.0
fixed_margin = False
coeff_magnitude_th = 0.01
growth_policy=GrowthPolicy.ALL_TERMS
dipnn = DeepInterpretablePolynomialNeuralNetwork(d_max=d_max, lambda_param=lambda_param, balance=balance, fixed_margin=fixed_margin, ro=ro, derivative_magnitude_th=0.0, coeff_magnitude_th=coeff_magnitude_th,
max_no_terms_per_iteration=20, max_no_terms=200, growth_policy=growth_policy)
# Evaluation
no_runs = 100
test_size = 0.2
coefficient_threshold = 0.01
precision = 2
EvaluationTools.evaluate_multiple_times(dipnn, X, Y, no_runs, test_size, coefficient_threshold, precision)
print(f'The margin: {dipnn.ro}')
def experiment_growth_degree1():
''' In paper: table 2, line
'''
# Data
conjunctions = [[1],[3]]
p = 0.5
dataset_size = 1000
no_variables = 20
X, Y = boolean_concept_uniform_distribution(conjunctions, p, dataset_size, no_variables)
# Model
d_max = 1
balance = 1.0
lambda_param = 1.0
ro = 1.0
fixed_margin = False
coeff_magnitude_th = 0.01
growth_policy=GrowthPolicy.GROW
dipnn = DeepInterpretablePolynomialNeuralNetwork(d_max=d_max, lambda_param=lambda_param, balance=balance, fixed_margin=fixed_margin, ro=ro, derivative_magnitude_th=0.0, coeff_magnitude_th=coeff_magnitude_th,
max_no_terms_per_iteration=20, max_no_terms=200, growth_policy=growth_policy)
# Evaluation
no_runs = 100
test_size = 0.2
coefficient_threshold = 0.01
precision = 2
EvaluationTools.evaluate_multiple_times(dipnn, X, Y, no_runs, test_size, coefficient_threshold, precision)
print(f'The margin: {dipnn.ro}')
def experiment_growth_degree2():
''' In paper: table 2, line
'''
# Data
conjunctions = [[1,2],[3, 4]]
p = 0.75
dataset_size = 1000
no_variables = 20
X, Y = boolean_concept_uniform_distribution(conjunctions, p, dataset_size, no_variables)
# Model
d_max = 2
balance = 1.0
lambda_param = 1.0
ro = 1.0
fixed_margin = False
coeff_magnitude_th = 0.01
growth_policy=GrowthPolicy.GROW
dipnn = DeepInterpretablePolynomialNeuralNetwork(d_max=d_max, lambda_param=lambda_param, balance=balance, fixed_margin=fixed_margin, ro=ro, derivative_magnitude_th=0.0, coeff_magnitude_th=coeff_magnitude_th,
max_no_terms_per_iteration=20, max_no_terms=200, growth_policy=growth_policy)
# Evaluation
no_runs = 100
test_size = 0.2
coefficient_threshold = 0.01
precision = 2
EvaluationTools.evaluate_multiple_times(dipnn, X, Y, no_runs, test_size, coefficient_threshold, precision)
print(f'The margin: {dipnn.ro}')
def experiment_growth_degree3():
''' In paper: table 2, line
'''
# Data
conjunctions = [[1,2,10],[3, 4,8]]
p = 0.75
dataset_size = 1000
no_variables = 20
X, Y = boolean_concept_uniform_distribution(conjunctions, p, dataset_size, no_variables)
# Model
d_max = 3
balance = 1.0
lambda_param = 1.0
ro = 1.0
fixed_margin = False
coeff_magnitude_th = 0.01
growth_policy=GrowthPolicy.GROW
dipnn = DeepInterpretablePolynomialNeuralNetwork(d_max=d_max, lambda_param=lambda_param, balance=balance, fixed_margin=fixed_margin, ro=ro, derivative_magnitude_th=0.0, coeff_magnitude_th=coeff_magnitude_th,
max_no_terms_per_iteration=20, max_no_terms=200, growth_policy=growth_policy)
# Evaluation
no_runs = 100
test_size = 0.2
coefficient_threshold = 0.01
precision = 2
EvaluationTools.evaluate_multiple_times(dipnn, X, Y, no_runs, test_size, coefficient_threshold, precision)
print(f'The margin: {dipnn.ro}')
def experiment_growth_degree4():
# Data
conjunctions = [[1,2,10, 15],[1, 4,8, 12]]
p = 0.75
dataset_size = 1000
no_variables = 20
X, Y = boolean_concept_uniform_distribution(conjunctions, p, dataset_size, no_variables)
# Model
d_max = 4
balance = 1.0
lambda_param = 1.0
ro = 1.0
fixed_margin = False
coeff_magnitude_th = 0.01
growth_policy=GrowthPolicy.GROW
dipnn = DeepInterpretablePolynomialNeuralNetwork(d_max=d_max, lambda_param=lambda_param, balance=balance, fixed_margin=fixed_margin, ro=ro, derivative_magnitude_th=0.0, coeff_magnitude_th=coeff_magnitude_th,
max_no_terms_per_iteration=20, max_no_terms=200, growth_policy=growth_policy)
# Evaluation
no_runs = 1
test_size = 0.2
coefficient_threshold = 0.01
precision = 2
EvaluationTools.evaluate_multiple_times(dipnn, X, Y, no_runs, test_size, coefficient_threshold, precision)
print(f'The margin: {dipnn.ro}')
def experiment_growth_and_pruning_degree5():
# Data
conjunctions = [[1,3,10, 15, 16],[2, 4,8, 12]]
p = 0.75
dataset_size = 1000
no_variables = 20
X, Y = boolean_concept_uniform_distribution(conjunctions, p, dataset_size, no_variables)
# Model
d_max = 5
balance = 1.0
lambda_param = 1.0
ro = 1.0
fixed_margin = False
coeff_magnitude_th = 0.01
growth_policy=GrowthPolicy.GROW
max_no_terms_per_iteration = 20
max_no_terms = 20
dipnn = DeepInterpretablePolynomialNeuralNetwork(d_max=d_max, lambda_param=lambda_param, balance=balance, fixed_margin=fixed_margin, ro=ro, derivative_magnitude_th=0.0, coeff_magnitude_th=coeff_magnitude_th,
max_no_terms_per_iteration=max_no_terms_per_iteration, max_no_terms=max_no_terms, growth_policy=growth_policy)
# Evaluation
no_runs = 100
test_size = 0.2
coefficient_threshold = 0.01
precision = 2
EvaluationTools.evaluate_multiple_times(dipnn, X, Y, no_runs, test_size, coefficient_threshold, precision)
print(f'The margin: {dipnn.ro}')
def experiment_growth_and_pruning_degree2():
''' In paper: table 2, line
'''
# Data
conjunctions = [[1,2],[3, 4]]
p = 0.75
dataset_size = 1000
no_variables = 20
X, Y = boolean_concept_uniform_distribution(conjunctions, p, dataset_size, no_variables)
# Model
d_max = 2
balance = 1.0
lambda_param = 1.0
ro = 1.0
fixed_margin = False
coeff_magnitude_th = 0.01
growth_policy=GrowthPolicy.PRUNE_AND_GROW
dipnn = DeepInterpretablePolynomialNeuralNetwork(d_max=d_max, lambda_param=lambda_param, balance=balance, fixed_margin=fixed_margin, ro=ro, derivative_magnitude_th=0.0, coeff_magnitude_th=coeff_magnitude_th,
max_no_terms_per_iteration=20, max_no_terms=200, growth_policy=growth_policy)
# Evaluation
no_runs = 100
test_size = 0.2
coefficient_threshold = 0.01
precision = 2
EvaluationTools.evaluate_multiple_times(dipnn, X, Y, no_runs, test_size, coefficient_threshold, precision)
print(f'The margin: {dipnn.ro}')
def experiment_growth_and_pruning_degree3():
''' In paper: table 2, line
'''
# Data
conjunctions = [[1,2,10],[3, 4,8]]
p = 0.75
dataset_size = 1000
no_variables = 20
X, Y = boolean_concept_uniform_distribution(conjunctions, p, dataset_size, no_variables)
# Model
d_max = 3
balance = 1.0
lambda_param = 1.0
ro = 1.0
fixed_margin = False
coeff_magnitude_th = 0.01
growth_policy=GrowthPolicy.PRUNE_AND_GROW
dipnn = DeepInterpretablePolynomialNeuralNetwork(d_max=d_max, lambda_param=lambda_param, balance=balance, fixed_margin=fixed_margin, ro=ro, derivative_magnitude_th=0.0, coeff_magnitude_th=coeff_magnitude_th,
max_no_terms_per_iteration=20, max_no_terms=200, growth_policy=growth_policy)
# Evaluation
no_runs = 100
test_size = 0.2
coefficient_threshold = 0.01
precision = 2
EvaluationTools.evaluate_multiple_times(dipnn, X, Y, no_runs, test_size, coefficient_threshold, precision)
print(f'The margin: {dipnn.ro}')
def experiment_growth_and_pruning_degree4():
# Data
conjunctions = [[1,2,10, 15],[1, 4,8, 12]]
p = 0.75
dataset_size = 1000
no_variables = 20
X, Y = boolean_concept_uniform_distribution(conjunctions, p, dataset_size, no_variables)
# Model
d_max = 4
balance = 1.0
lambda_param = 1.0
ro = 1.0
fixed_margin = False
coeff_magnitude_th = 0.01
growth_policy=GrowthPolicy.PRUNE_AND_GROW
dipnn = DeepInterpretablePolynomialNeuralNetwork(d_max=d_max, lambda_param=lambda_param, balance=balance, fixed_margin=fixed_margin, ro=ro, derivative_magnitude_th=0.0, coeff_magnitude_th=coeff_magnitude_th,
max_no_terms_per_iteration=20, max_no_terms=200, growth_policy=growth_policy)
# Evaluation
no_runs = 100
test_size = 0.2
coefficient_threshold = 0.01
precision = 2
EvaluationTools.evaluate_multiple_times(dipnn, X, Y, no_runs, test_size, coefficient_threshold, precision)
print(f'The margin: {dipnn.ro}')
def experiment_growth_and_pruning_degree5():
# Data
conjunctions = [[1,3,10, 15, 16],[2, 4,8, 12]]
p = 0.75
dataset_size = 1000
no_variables = 20
X, Y = boolean_concept_uniform_distribution(conjunctions, p, dataset_size, no_variables)
# Model
d_max = 5
balance = 1.0
lambda_param = 1.0
ro = 1.0
fixed_margin = False
coeff_magnitude_th = 0.01
growth_policy=GrowthPolicy.PRUNE_AND_GROW
max_no_terms_per_iteration = 20
max_no_terms = 20
dipnn = DeepInterpretablePolynomialNeuralNetwork(d_max=d_max, lambda_param=lambda_param, balance=balance, fixed_margin=fixed_margin, ro=ro, derivative_magnitude_th=0.0, coeff_magnitude_th=coeff_magnitude_th,
max_no_terms_per_iteration=max_no_terms_per_iteration, max_no_terms=max_no_terms, growth_policy=growth_policy)
# Evaluation
no_runs = 100
test_size = 0.2
coefficient_threshold = 0.01
precision = 2
EvaluationTools.evaluate_multiple_times(dipnn, X, Y, no_runs, test_size, coefficient_threshold, precision)
print(f'The margin: {dipnn.ro}')
def experiment_all_terms_degree2_no_fixed_margin():
# Data
conjunctions = [[1,2],[1,3]]
p = 0.5
dataset_size = 1000
no_variables = 20
X, Y = boolean_concept_uniform_distribution(conjunctions, p, dataset_size, no_variables)
# Model
d_max = 2
balance = 2.0
lambda_param = 10.0
ro = 1.0
fixed_margin = False
dipnn = DeepInterpretablePolynomialNeuralNetwork(d_max=d_max, lambda_param=lambda_param, balance=balance, fixed_margin=fixed_margin, ro=ro, derivative_magnitude_th=0.0, coeff_magnitude_th=0.0,
max_no_terms_per_iteration=10, max_no_terms=200, growth_policy=GrowthPolicy.ALL_TERMS)
# Evaluation
no_runs = 1
test_size = 0.2
coefficient_threshold = 0.01
EvaluationTools.evaluate_multiple_times(dipnn, X, Y, no_runs, test_size, coefficient_threshold)
print(f'The margin: {dipnn.ro}')
def experiment_growth_degree2_no_fixed_margin():
# Data
conjunctions = [[1,2],[1,3], [2,4]]
p = 0.5
dataset_size = 1000
no_variables = 20
X, Y = boolean_concept_uniform_distribution(conjunctions, p, dataset_size, no_variables)
# Model
d_max = 2
balance = 1.0
lambda_param = 1.0
ro = 1.0
fixed_margin = False
dipnn = DeepInterpretablePolynomialNeuralNetwork(d_max=d_max, lambda_param=lambda_param, balance=balance, fixed_margin=fixed_margin, ro=ro, derivative_magnitude_th=0.0, coeff_magnitude_th=0.0,
max_no_terms_per_iteration=10, max_no_terms=200, growth_policy=GrowthPolicy.GROW)
# Evaluation
no_runs = 10
test_size = 0.2
coefficient_threshold = 0.01
EvaluationTools.evaluate_multiple_times(dipnn, X, Y, no_runs, test_size, coefficient_threshold)
print(f'The margin: {dipnn.ro}')
def experiment_growth_degree2_no_fixed_margin():
# Data
conjunctions = [[1,2],[1,3], [2,4]]
p = 0.5
dataset_size = 1000
no_variables = 20
X, Y = boolean_concept_uniform_distribution(conjunctions, p, dataset_size, no_variables)
# Model
d_max = 2
balance = 1.0
lambda_param = 1.0
ro = 1.0
fixed_margin = False
dipnn = DeepInterpretablePolynomialNeuralNetwork(d_max=d_max, lambda_param=lambda_param, balance=balance, fixed_margin=fixed_margin, | |
of the transform would have the following form::
{
DataKeys.INPUT: ...,
DataKeys.TARGET: ...,
DataKeys.METADATA: ...,
}
You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:
.. code-block:: python
from flash.core.data.transforms import ApplyToKeys
class MyInputTransform(InputTransform):
def per_batch_transform_on_device(self) -> Callable:
return ApplyToKeys("input", my_func)
"""
pass
def val_input_per_batch_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "input" key of each single sample
on device for the validating stage."""
pass
def val_target_per_batch_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "target" key of each single sample
on device for the validating stage."""
pass
def test_per_batch_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on a batch of data on device for the testing stage.
The input data of the transform would have the following form::
{
DataKeys.INPUT: ...,
DataKeys.TARGET: ...,
DataKeys.METADATA: ...,
}
"""
pass
def test_input_per_batch_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "input" key of each single sample
on device for the testing stage."""
pass
def test_target_per_batch_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "target" key of each single sample
on device for the testing stage."""
pass
def predict_per_batch_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on a batch of data on device for the predicting stage.
The input data of the transform would have the following form::
{
DataKeys.INPUT: ...,
DataKeys.TARGET: ...,
DataKeys.METADATA: ...,
}
You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:
.. code-block:: python
from flash.core.data.transforms import ApplyToKeys
class MyInputTransform(InputTransform):
def per_batch_transform_on_device(self) -> Callable:
return ApplyToKeys("input", my_func)
"""
pass
def predict_input_per_batch_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "input" key of each single sample
on device for the predicting stage."""
pass
def predict_target_per_batch_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "target" key of each single sample
on device for the predicting stage."""
pass
###########
# COLLATE #
###########
def train_collate(self) -> Callable:
"""Defines the transform to be applied on a list of training sample to create a training batch."""
return default_collate
def val_collate(self) -> Callable:
"""Defines the transform to be applied on a list of validating sample to create a validating batch."""
return default_collate
def test_collate(self) -> Callable:
"""Defines the transform to be applied on a list of testing sample to create a testing batch."""
return default_collate
def predict_collate(self) -> Callable:
"""Defines the transform to be applied on a list of predicting sample to create a predicting batch."""
return default_collate
def serve_collate(self) -> Callable:
"""Defines the transform to be applied on a list of serving sample to create a serving batch."""
return default_collate
def collate(self) -> Callable:
"""Defines the transform to be applied on a list of sample to create a batch for all stages."""
return default_collate
########################################
# HOOKS CALLED INTERNALLY WITHIN FLASH #
########################################
def _per_sample_transform(self, sample: Any, stage: RunningStage) -> Any:
fn = self.current_transform(stage=stage, current_fn="per_sample_transform")
if isinstance(sample, list):
return [fn(s) for s in sample]
return fn(sample)
def _per_batch_transform(self, batch: Any, stage: RunningStage) -> Any:
"""Transforms to apply to a whole batch (if possible use this for efficiency).
.. note:: This option is mutually exclusive with :meth:`per_sample_transform_on_device`, since if both are
specified, uncollation has to be applied.
"""
return self.current_transform(stage=stage, current_fn="per_batch_transform")(batch)
def _collate(self, samples: Sequence, stage: RunningStage) -> Any:
"""Transform to convert a sequence of samples to a collated batch."""
return self.current_transform(stage=stage, current_fn="collate")(samples)
def _per_sample_transform_on_device(self, sample: Any, stage: RunningStage) -> Any:
"""Transforms to apply to the data before the collation (per-sample basis).
.. note:: This option is mutually exclusive with :meth:`per_batch_transform`, since if both are
specified, uncollation has to be applied. .. note:: This function won't be called within the dataloader
workers, since to make that happen each of the workers would have to create it's own CUDA-context which
would pollute GPU memory (if on GPU).
"""
fn = self.current_transform(stage=stage, current_fn="per_sample_transform_on_device")
if isinstance(sample, list):
return [fn(s) for s in sample]
return fn(sample)
def _per_batch_transform_on_device(self, batch: Any, stage: RunningStage) -> Any:
"""Transforms to apply to a whole batch (if possible use this for efficiency).
.. note:: This function won't be called within the dataloader workers, since to make that happen each of
the workers would have to create it's own CUDA-context which would pollute GPU memory (if on GPU).
"""
return self.current_transform(stage=stage, current_fn="per_batch_transform_on_device")(batch)
#############
# UTILITIES #
#############
def inject_collate_fn(self, collate_fn: Callable):
# For all the stages possible, set collate function
if collate_fn is not default_collate:
for stage in RunningStage:
if stage not in [RunningStage.SANITY_CHECKING, RunningStage.TUNING]:
self._transform[stage].transforms[InputTransformPlacement.COLLATE.value] = collate_fn
def _populate_transforms_for_stage(self, running_stage: RunningStage):
transform, collate_in_worker = self.__check_transforms(
transform=self.__resolve_transforms(running_stage), stage=running_stage
)
if self._transform is None:
self._transform = {}
self._transform[running_stage] = _InputTransformPerStage(
collate_in_worker_from_transform=collate_in_worker,
transforms=transform,
)
def __resolve_transforms(self, running_stage: RunningStage) -> Optional[Dict[str, Callable]]:
from flash.core.data.data_pipeline import DataPipeline
transforms_out = {}
stage = _STAGES_PREFIX[running_stage]
# iterate over all transforms hook name
for transform_name in InputTransformPlacement:
transforms = {}
transform_name = transform_name.value
# iterate over all prefixes
for key in ApplyToKeyPrefix:
# get the resolved hook name based on the current stage
resolved_name = DataPipeline._resolve_function_hierarchy(
transform_name, self, running_stage, InputTransform
)
# check if the hook name is specialized
is_specialized_name = resolved_name.startswith(stage)
# get the resolved hook name for apply to key on the current stage
resolved_apply_to_key_name = DataPipeline._resolve_function_hierarchy(
f"{key}_{transform_name}", self, running_stage, InputTransform
)
# check if resolved hook name for apply to key is specialized
is_specialized_apply_to_key_name = resolved_apply_to_key_name.startswith(stage)
# check if they are overridden by the user
resolve_name_overridden = DataPipeline._is_overridden(resolved_name, self, InputTransform)
resolved_apply_to_key_name_overridden = DataPipeline._is_overridden(
resolved_apply_to_key_name, self, InputTransform
)
if resolve_name_overridden and resolved_apply_to_key_name_overridden:
# if both are specialized or both aren't specialized, raise a exception
# It means there is priority to specialize hooks name.
if not (is_specialized_name ^ is_specialized_apply_to_key_name):
raise MisconfigurationException(
f"Only one of {resolved_name} or {resolved_apply_to_key_name} can be overridden."
)
method_name = resolved_name if is_specialized_name else resolved_apply_to_key_name
else:
method_name = resolved_apply_to_key_name if resolved_apply_to_key_name_overridden else resolved_name
# get associated transform
try:
fn = getattr(self, method_name)()
except AttributeError as e:
raise AttributeError(str(e) + ". Hint: Call super().__init__(...) after setting all attributes.")
if fn is None:
continue
if not callable(fn):
raise MisconfigurationException(f"The hook {method_name} should return a function.")
# wrap apply to key hook into `ApplyToKeys` with the associated key.
if method_name == resolved_apply_to_key_name:
fn = ApplyToKeys(key.value, fn)
if method_name not in transforms:
transforms[method_name] = fn
# store the transforms.
if transforms:
transforms = list(transforms.values())
transforms_out[transform_name] = Compose(transforms) if len(transforms) > 1 else transforms[0]
return transforms_out
def __check_transforms(
self, transform: Optional[Dict[str, Callable]], stage: RunningStage
) -> Tuple[Optional[Dict[str, Callable]], Optional[bool]]:
if transform is None:
return transform
keys_diff = set(transform.keys()).difference([v.value for v in InputTransformPlacement])
if len(keys_diff) > 0:
raise MisconfigurationException(
f"{stage}_transform contains {keys_diff}. Only {_INPUT_TRANSFORM_FUNCS} keys are supported."
)
is_per_batch_transform_in = "per_batch_transform" in transform
is_per_sample_transform_on_device_in = "per_sample_transform_on_device" in transform
if is_per_batch_transform_in and is_per_sample_transform_on_device_in:
raise MisconfigurationException(
f"{transform}: `per_batch_transform` and `per_sample_transform_on_device` are mutually exclusive."
)
collate_in_worker: Optional[bool] = None
if is_per_batch_transform_in or (not is_per_batch_transform_in and not is_per_sample_transform_on_device_in):
collate_in_worker = True
elif is_per_sample_transform_on_device_in:
collate_in_worker = False
return transform, collate_in_worker
@staticmethod
def _identity(x: Any) -> Any:
return x
def __str__(self) -> str:
return f"{self.__class__.__name__}(" + f"running_stage={self.running_stage}, transform={self._transform})"
def __getitem__(self, placement: InputTransformPlacement) -> Callable:
return self._transform[placement]
@dataclass
class LambdaInputTransform(InputTransform):
transform: Callable = InputTransform._identity
def per_sample_transform(self) -> Callable:
return self.transform
def create_or_configure_input_transform(
transform: INPUT_TRANSFORM_TYPE,
transform_kwargs: Optional[Dict] = None,
) -> Optional[InputTransform]:
if not transform_kwargs:
transform_kwargs = {}
if isinstance(transform, InputTransform):
return transform
if inspect.isclass(transform) and issubclass(transform, InputTransform):
# Deprecation Warning
rank_zero_warn(
"Please pass an instantiated object of the `InputTransform` class. Passing the Class and keyword arguments"
" separately has been deprecated since v0.8.0 and will be removed in v0.9.0.",
stacklevel=8,
category=FutureWarning,
)
return transform(**transform_kwargs)
if isinstance(transform, partial):
return transform(**transform_kwargs)
if isinstance(transform, Callable):
return LambdaInputTransform(
transform=transform,
**transform_kwargs,
)
if not transform:
return None
raise MisconfigurationException(f"The format for the transform isn't correct. Found {transform}")
class _InputTransformProcessor:
"""
This class is used to encapsulate the following functions of an `InputTransform` | |
remove the added hook by calling
``handle.remove()``.
"""
handle = RemovableHandle(self._mock_hooks)
self._mock_hooks[handle.id] = hook
return handle
def register_intern_hook(self, hook: ActionHook) -> RemovableHandle:
"""Registers an intern hook on the exporter.
The hook will be called each time a module matches against an :meth:`intern` pattern.
It should have the following signature::
hook(exporter: PackageExporter, module_name: str) -> None
Hooks will be called in order of registration.
Returns:
:class:`torch.utils.hooks.RemovableHandle`:
A handle that can be used to remove the added hook by calling
``handle.remove()``.
"""
handle = RemovableHandle(self._intern_hooks)
self._intern_hooks[handle.id] = hook
return handle
def intern(
self,
include: "GlobPattern",
*,
exclude: "GlobPattern" = (),
allow_empty: bool = True,
):
"""Specify modules that should be packaged. A module must match some ``intern`` pattern in order to be
included in the package and have its dependencies processed recursively.
Args:
include (Union[List[str], str]): A string e.g. "my_package.my_subpackage", or list of strings
for the names of the modules to be externed. This can also be a glob-style pattern, as described in :meth:`mock`.
exclude (Union[List[str], str]): An optional pattern that excludes some patterns that match the include string.
allow_empty (bool): An optional flag that specifies whether the intern modules specified by this call
to the ``intern`` method must be matched to some module during packaging. If an ``intern`` module glob
pattern is added with ``allow_empty=False``, and :meth:`close` is called (either explicitly or via ``__exit__``)
before any modules match that pattern, an exception is thrown. If ``allow_empty=True``, no such exception is thrown.
"""
self.patterns[GlobGroup(include, exclude=exclude)] = _PatternInfo(
_ModuleProviderAction.INTERN, allow_empty
)
def mock(
self,
include: "GlobPattern",
*,
exclude: "GlobPattern" = (),
allow_empty: bool = True,
):
"""Replace some required modules with a mock implementation. Mocked modules will return a fake
object for any attribute accessed from it. Because we copy file-by-file, the dependency resolution will sometimes
find files that are imported by model files but whose functionality is never used
(e.g. custom serialization code or training helpers).
Use this function to mock this functionality out without having to modify the original code.
Args:
include (Union[List[str], str]): A string e.g. ``"my_package.my_subpackage"``, or list of strings
for the names of the modules to be mocked out. Strings can also be a glob-style pattern
string that may match multiple modules. Any required dependencies that match this pattern
string will be mocked out automatically.
Examples :
``'torch.**'`` -- matches ``torch`` and all submodules of torch, e.g. ``'torch.nn'``
and ``'torch.nn.functional'``
``'torch.*'`` -- matches ``'torch.nn'`` or ``'torch.functional'``, but not
``'torch.nn.functional'``
exclude (Union[List[str], str]): An optional pattern that excludes some patterns that match the include string.
e.g. ``include='torch.**', exclude='torch.foo'`` will mock all torch packages except ``'torch.foo'``,
Default: is ``[]``.
allow_empty (bool): An optional flag that specifies whether the mock implementation(s) specified by this call
to the :meth:`mock` method must be matched to some module during packaging. If a mock is added with
``allow_empty=False``, and :meth:`close` is called (either explicitly or via ``__exit__``) and the mock has
not been matched to a module used by the package being exported, an exception is thrown.
If ``allow_empty=True``, no such exception is thrown.
"""
self.patterns[GlobGroup(include, exclude=exclude)] = _PatternInfo(
_ModuleProviderAction.MOCK, allow_empty
)
def extern(
self,
include: "GlobPattern",
*,
exclude: "GlobPattern" = (),
allow_empty: bool = True,
):
"""Include ``module`` in the list of external modules the package can import.
This will prevent dependency discovery from saving
it in the package. The importer will load an external module directly from the standard import system.
Code for extern modules must also exist in the process loading the package.
Args:
include (Union[List[str], str]): A string e.g. ``"my_package.my_subpackage"``, or list of strings
for the names of the modules to be externed. This can also be a glob-style pattern, as
described in :meth:`mock`.
exclude (Union[List[str], str]): An optional pattern that excludes some patterns that match the
include string.
allow_empty (bool): An optional flag that specifies whether the extern modules specified by this call
to the ``extern`` method must be matched to some module during packaging. If an extern module glob
pattern is added with ``allow_empty=False``, and :meth:`close` is called (either explicitly or via
``__exit__``) before any modules match that pattern, an exception is thrown. If ``allow_empty=True``,
no such exception is thrown.
"""
self.patterns[GlobGroup(include, exclude=exclude)] = _PatternInfo(
_ModuleProviderAction.EXTERN, allow_empty
)
def deny(self, include: "GlobPattern", *, exclude: "GlobPattern" = ()):
"""Blocklist modules who names match the given glob patterns from the list of modules the package can import.
If a dependency on any matching packages is found, a :class:`PackagingError` is raised.
Args:
include (Union[List[str], str]): A string e.g. ``"my_package.my_subpackage"``, or list of strings
for the names of the modules to be externed. This can also be a glob-style pattern, as described in :meth:`mock`.
exclude (Union[List[str], str]): An optional pattern that excludes some patterns that match the include string.
"""
self.patterns[GlobGroup(include, exclude=exclude)] = _PatternInfo(
_ModuleProviderAction.DENY, allow_empty=True
)
def _persistent_id(self, obj):
if torch.is_storage(obj):
storage_type = normalize_storage_type(type(obj))
location = location_tag(obj)
# serialize storage if not already written
storage_present = self.storage_context.has_storage(obj)
storage_id = self.storage_context.get_or_add_storage(obj)
if not storage_present:
if obj.device.type != "cpu":
obj = obj.cpu()
num_bytes = obj.size() * obj.element_size()
self.zip_file.write_record(
f".data/{storage_id}.storage", obj.data_ptr(), num_bytes
)
return ("storage", storage_type, storage_id, location, obj.size())
if hasattr(obj, "__reduce_package__"):
if _gate_torchscript_serialization and isinstance(
obj, torch.jit.RecursiveScriptModule
):
raise Exception(
"Serializing ScriptModules directly into a package is a beta feature. "
"To use, set global "
"`torch.package.package_exporter._gate_torchscript_serialization` to `False`."
)
if self.serialized_reduces.get(id(obj)) is None:
self.serialized_reduces[id(obj)] = (
"reduce_package",
id(obj),
*obj.__reduce_package__(self),
)
return self.serialized_reduces[id(obj)]
return None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
# If __exit__ was called because an exception was raised, we do not attempt to
# attempt to finalize the package. Instead, control is returned to the
# caller to continue raising the exception.
if exc_type is not None:
# Do the bare minimum to leave the open buffer in a valid state.
self._finalize_zip()
return
self.close()
def _write(self, filename, str_or_bytes):
if filename in self._written_files:
raise AssertionError(
f"Tried to write file '{filename}', but it already exists in this archive. "
"Please file a bug."
)
self._written_files.add(filename)
if is_mangled(filename):
raise AssertionError(
f"Tried to save a torch.package'd module as '{filename}'. "
"Directly saving torch.package'd modules is not allowed."
)
if isinstance(str_or_bytes, str):
str_or_bytes = str_or_bytes.encode("utf-8")
self.zip_file.write_record(filename, str_or_bytes, len(str_or_bytes))
def _validate_dependency_graph(self):
# 1. Check the graph for any errors inserted during dependency analysis.
for module_name, attrs in self.dependency_graph.nodes.items():
if "error" in attrs:
raise PackagingError(self.dependency_graph)
# 2. Check that all patterns for which allow_empty=False have been matched at least once.
for pattern, pattern_info in self.patterns.items():
if not pattern_info.allow_empty and not pattern_info.was_matched:
raise EmptyMatchError(
f"Exporter did not match any modules to {pattern}, which was marked as allow_empty=False"
)
def _write_mock_file(self):
if "_mock.py" not in self._written_files:
mock_file = str(Path(__file__).parent / "_mock.py")
self._write_source_string("_mock", _read_file(mock_file), is_package=False)
def _execute_dependency_graph(self):
"""Takes a finalized dependency graph describing how to package all
modules and executes it, writing to the ZIP archive.
"""
self._validate_dependency_graph()
extern_modules = []
_mock_written = False
for module_name, attrs in self.dependency_graph.nodes.items():
action = attrs["action"]
if action == _ModuleProviderAction.EXTERN:
for hook in self._extern_hooks.values():
hook(self, module_name)
extern_modules.append(module_name)
elif action == _ModuleProviderAction.MOCK:
for hook in self._mock_hooks.values():
hook(self, module_name)
self._write_mock_file()
is_package = hasattr(self._import_module(module_name), "__path__")
self._write_source_string(module_name, _MOCK_IMPL, is_package)
elif action == _ModuleProviderAction.INTERN:
for hook in self._intern_hooks.values():
hook(self, module_name)
# The node in the dependency graph contains metadata that tells us
# how to intern the module.
if "provided" not in attrs:
raise AssertionError(
f"Module was marked `intern` but not provided: {module_name}"
)
if attrs.get("is_pickle") is True:
# This node came from save_source_pickle, we don't need to write any source for it.
continue
is_package = attrs["is_package"]
source = attrs["source"]
self._write_source_string(module_name, source, is_package)
elif action == _ModuleProviderAction.REPACKAGED_MOCK_MODULE:
self._write_mock_file()
else:
raise AssertionError(
f"Invalid action: {module_name}, {action}. Please report a bug to PyTorch."
)
extern_file_contents = "\n".join(extern_modules) + "\n"
self._write(".data/extern_modules", extern_file_contents)
def close(self):
"""Write the package to the filesystem. Any calls after :meth:`close` are now invalid.
It is preferable to use resource guard syntax instead::
with PackageExporter("file.zip") as e:
| |
<reponame>JOFLIX/grapevines<gh_stars>0
# Copyright 2011-2017, <NAME> and The Tor Project
# See LICENSE for licensing information
"""
Handlers for text configuration files. Configurations are simple string to
string mappings, with the configuration files using the following rules...
* the key/value is separated by a space
* anything after a '#' is ignored as a comment
* excess whitespace is trimmed
* empty lines are ignored
* multi-line values can be defined by following the key with lines starting
with a '|'
For instance...
::
# This is my sample config
user.name Galen
user.password y<PASSWORD> # here's an inline comment
user.notes takes a fancy to pepperjack cheese
blankEntry.example
msg.greeting
|Multi-line message exclaiming of the
|wonder and awe that is pepperjack!
... would be loaded as...
::
config = {
'user.name': 'Galen',
'user.password': '<PASSWORD>',
'user.notes': 'takes a fancy to pepperjack cheese',
'blankEntry.example': '',
'msg.greeting': 'Multi-line message exclaiming of the\\nwonder and awe that is pepperjack!',
}
Configurations are managed via the :class:`~stem.util.conf.Config` class. The
:class:`~stem.util.conf.Config` can be be used directly with its
:func:`~stem.util.conf.Config.get` and :func:`~stem.util.conf.Config.set`
methods, but usually modules will want a local dictionary with just the
configurations that it cares about.
To do this use the :func:`~stem.util.conf.config_dict` function. For example...
::
import getpass
from stem.util import conf, connection
def config_validator(key, value):
if key == 'timeout':
# require at least a one second timeout
return max(1, value)
elif key == 'endpoint':
if not connection.is_valid_ipv4_address(value):
raise ValueError("'%s' isn't a valid IPv4 address" % value)
elif key == 'port':
if not connection.is_valid_port(value):
raise ValueError("'%s' isn't a valid port" % value)
elif key == 'retries':
# negative retries really don't make sense
return max(0, value)
CONFIG = conf.config_dict('ssh_login', {
'username': getpass.getuser(),
'password': '',
'timeout': 10,
'endpoint': '263.12.8.0',
'port': 22,
'reconnect': False,
'retries': 3,
}, config_validator)
There's several things going on here so lets take it step by step...
* The :func:`~stem.util.conf.config_dict` provides a dictionary that's bound
to a given configuration. If the "ssh_proxy_config" configuration changes
then so will the contents of CONFIG.
* The dictionary we're passing to :func:`~stem.util.conf.config_dict` provides
two important pieces of information: default values and their types. See the
Config's :func:`~stem.util.conf.Config.get` method for how these type
inferences work.
* The config_validator is a hook we're adding to make sure CONFIG only gets
values we think are valid. In this case it ensures that our timeout value
is at least one second, and rejects endpoints or ports that are invalid.
Now lets say our user has the following configuration file...
::
username waddle_doo
password <PASSWORD>
timeout -15
port 9000000
retries lots
reconnect true
logging debug
... and we load it as follows...
::
>>> from stem.util import conf
>>> our_config = conf.get_config('ssh_login')
>>> our_config.load('/home/atagar/user_config')
>>> print CONFIG # doctest: +SKIP
{
"username": "waddle_doo",
"password": "<PASSWORD>",
"timeout": 1,
"endpoint": "263.12.8.0",
"port": 22,
"reconnect": True,
"retries": 3,
}
Here's an expanation of what happened...
* the username, password, and reconnect attributes took the values in the
configuration file
* the 'config_validator' we added earlier allows for a minimum timeout of one
and rejected the invalid port (with a log message)
* we weren't able to convert the retries' "lots" value to an integer so it kept
its default value and logged a warning
* the user didn't supply an endpoint so that remained unchanged
* our CONFIG didn't have a 'logging' attribute so it was ignored
**Module Overview:**
::
config_dict - provides a dictionary that's kept in sync with our config
get_config - singleton for getting configurations
uses_settings - provides an annotation for functions that use configurations
parse_enum_csv - helper funcion for parsing confguration entries for enums
Config - Custom configuration
|- load - reads a configuration file
|- save - writes the current configuration to a file
|- clear - empties our loaded configuration contents
|- add_listener - notifies the given listener when an update occurs
|- clear_listeners - removes any attached listeners
|- keys - provides keys in the loaded configuration
|- set - sets the given key/value pair
|- unused_keys - provides keys that have never been requested
|- get - provides the value for a given key, with type inference
+- get_value - provides the value for a given key as a string
"""
import inspect
import os
import threading
import stem.prereq
from stem.util import log
try:
# added in python 2.7
from collections import OrderedDict
except ImportError:
from stem.util.ordereddict import OrderedDict
CONFS = {} # mapping of identifier to singleton instances of configs
class _SyncListener(object):
def __init__(self, config_dict, interceptor):
self.config_dict = config_dict
self.interceptor = interceptor
def update(self, config, key):
if key in self.config_dict:
new_value = config.get(key, self.config_dict[key])
if new_value == self.config_dict[key]:
return # no change
if self.interceptor:
interceptor_value = self.interceptor(key, new_value)
if interceptor_value:
new_value = interceptor_value
self.config_dict[key] = new_value
def config_dict(handle, conf_mappings, handler = None):
"""
Makes a dictionary that stays synchronized with a configuration.
This takes a dictionary of 'config_key => default_value' mappings and
changes the values to reflect our current configuration. This will leave
the previous values alone if...
* we don't have a value for that config_key
* we can't convert our value to be the same type as the default_value
If a handler is provided then this is called just prior to assigning new
values to the config_dict. The handler function is expected to accept the
(key, value) for the new values and return what we should actually insert
into the dictionary. If this returns None then the value is updated as
normal.
For more information about how we convert types see our
:func:`~stem.util.conf.Config.get` method.
**The dictionary you get from this is manged by the Config class and should
be treated as being read-only.**
:param str handle: unique identifier for a config instance
:param dict conf_mappings: config key/value mappings used as our defaults
:param functor handler: function referred to prior to assigning values
"""
selected_config = get_config(handle)
selected_config.add_listener(_SyncListener(conf_mappings, handler).update)
return conf_mappings
def get_config(handle):
"""
Singleton constructor for configuration file instances. If a configuration
already exists for the handle then it's returned. Otherwise a fresh instance
is constructed.
:param str handle: unique identifier used to access this config instance
"""
if handle not in CONFS:
CONFS[handle] = Config()
return CONFS[handle]
def uses_settings(handle, path, lazy_load = True):
"""
Provides a function that can be used as a decorator for other functions that
require settings to be loaded. Functions with this decorator will be provided
with the configuration as its 'config' keyword argument.
.. versionchanged:: 1.3.0
Omits the 'config' argument if the funcion we're decorating doesn't accept
it.
::
uses_settings = stem.util.conf.uses_settings('my_app', '/path/to/settings.cfg')
@uses_settings
def my_function(config):
print 'hello %s!' % config.get('username', '')
:param str handle: hande for the configuration
:param str path: path where the configuration should be loaded from
:param bool lazy_load: loads the configuration file when the decorator is
used if true, otherwise it's loaded right away
:returns: **function** that can be used as a decorator to provide the
configuration
:raises: **IOError** if we fail to read the configuration file, if
**lazy_load** is true then this arises when we use the decorator
"""
config = get_config(handle)
if not lazy_load and not config._settings_loaded:
config.load(path)
config._settings_loaded = True
def decorator(func):
def wrapped(*args, **kwargs):
if lazy_load and not config._settings_loaded:
config.load(path)
config._settings_loaded = True
if 'config' in inspect.getargspec(func).args:
return func(*args, config = config, **kwargs)
else:
return func(*args, **kwargs)
return wrapped
return decorator
def parse_enum(key, value, enumeration):
"""
Provides the enumeration value for a given key. This is a case insensitive
lookup and raises an exception if the enum key doesn't exist.
:param str key: configuration key being looked up
:param str value: value to be parsed
:param stem.util.enum.Enum enumeration: enumeration the values should be in
:returns: enumeration value
:raises: **ValueError** if the **value** isn't among the enumeration keys
"""
return parse_enum_csv(key, value, enumeration, 1)[0]
def parse_enum_csv(key, value, enumeration, count = None):
"""
Parses a given value as being a comma separated listing of enumeration keys,
returning the corresponding enumeration values. This is intended to be a
helper for config handlers. The checks this does are case insensitive.
The **count** attribute can be used to make assertions based on the number of
values. This can be...
| |
1], cmap='hot', interpolation='nearest')
# plt.show()
# plt.imshow(p[1, :, :, 2], cmap='hot', interpolation='nearest')
# plt.show()
# plt.imshow(p[1, :, :, 3], cmap='hot', interpolation='nearest')
# plt.show()
# print(p)
# print(np.argmax(p[0]))
# print(np.max(p, axis=1))
# print(p[0])
# print(np.argmax(p[0], axis=1))
# print(np.max(p[1], axis=1))
# print(np.argmax(p[1], axis=1))
# for pi in p[0]:
# print(pi)
# cents = to_local_average_cents(p[1])
# frequency = 10 * 2 ** (cents / 1200)
#
# for f in frequency:
# print(f)
# print('pred', frequency)
def test_pitch(task, tradition):
config = pyhocon.ConfigFactory.parse_file("experiments.conf")[task]
model = build_and_load_model(config, task)
model.load_weights('model/model-full.h5', by_name=True)
model_srate = config['model_srate']
step_size = config['hop_size']
cuttoff = config['cutoff']
for t in ['train', 'validate', 'test']:
data_path = config[tradition + '_' + t]
data = pd.read_csv(data_path, sep='\t')
data = data.reset_index()
slice_ind = 0
k = 0
while k < data.shape[0]:
path = data.loc[k, 'path']
# path = 'E:\\E2ERaga\\data\\RagaDataset\\audio\\f5999e30-d00d-4837-b9c6-5328768ae22d.wav'
pitch_path = path[:path.index('.wav')] + '.pitch'
pitch_path = pitch_path.replace('audio', 'pitches')
pitch_file = open(pitch_path, "w")
# if os.path.exists(pitch_path):
# pitch_file = open(pitch_path, "a")
# else:
# pitch_file = open(pitch_path, "w")
pitches = []
while True:
if slice_ind == 0:
print(pitch_path)
frames, slice_ind = __data_generation_pitch(path, slice_ind, model_srate, step_size, cuttoff)
p = model.predict(np.array([frames]))
# p = np.sum(np.reshape(p, [-1,6,60]), axis=1)
cents = to_local_average_cents(p)
frequency = 10 * 2 ** (cents / 1200)
# for p1 in p:
# p1 = list(map(str, p1))
# p1 = ','.join(p1)
# pitches.append(p1)
# p = ','.join(p)
pitches.extend(frequency)
# pitches.extend(p)
# pitches.append(p)
if slice_ind == 0:
k += 1
break
# frequency = list(map(str, frequency))
pitches = list(map(str, pitches))
pitch_file.writelines('\n'.join(pitches))
pitch_file.close()
break
break
def cache_cqt(task, tradition):
config = pyhocon.ConfigFactory.parse_file("experiments.conf")[task]
with h5py.File('data/RagaDataset/Hindustani/cqt_cache.hdf5', "w") as f:
for t in ['train', 'validate', 'test']:
data_path = config[tradition + '_' + t]
data = pd.read_csv(data_path, sep='\t')
data = data.reset_index()
k = 0
while k < data.shape[0]:
path = data.loc[k, 'path']
mbid = data.loc[k, 'mbid']
cqt = get_cqt(path)
f.create_dataset(mbid, data=cqt)
print(mbid, k)
k += 1
def get_cqt(path):
# return tf.ones([1,60], np.float32)
sr, audio = wavfile.read(path)
if len(audio.shape) == 2:
audio = audio.mean(1) # make mono
C = np.abs(librosa.cqt(audio, sr=sr, bins_per_octave=60, n_bins=60 * 7, pad_mode='wrap',
fmin=librosa.note_to_hz('C1')))
# librosa.display.specshow(C, sr=sr,x_axis='time', y_axis='cqt', cmap='coolwarm')
# fig, ax = plt.subplots()
c_cqt = librosa.amplitude_to_db(C, ref=np.max)
c_cqt = np.reshape(c_cqt, [7, 60, -1])
c_cqt = np.mean(c_cqt, axis=0)
# c_cqt = np.mean(c_cqt, axis=1, keepdims=True)
# img = librosa.display.specshow(c_cqt,
# sr=self.model_srate, x_axis='time', y_axis='cqt_note', ax=ax, bins_per_octave=60)
# ax.set_title('Constant-Q power spectrum')
# fig.colorbar(img, ax=ax, format="%+2.0f dB")
return c_cqt
def __data_generation_pitch(path, slice_ind, model_srate, step_size, cuttoff):
# pitch_path = self.data.loc[index, 'pitch_path']
# if self.current_data[2] == path:
# frames = self.current_data[0]
# pitches = self.current_data[1]
# pitches = pitches[slice_ind * int(len(frames) / n_cutoff):(slice_ind + 1) * int(len(frames) / n_cutoff)]
# frames = frames[slice_ind * int(len(frames) / n_cutoff):(slice_ind + 1) * int(len(frames) / n_cutoff)]
# return frames, pitches
# else:
# sr, audio = wavfile.read(path)
# if len(audio.shape) == 2:
# audio = audio.mean(1) # make mono
# audio = self.get_non_zero(audio)
sr, audio = wavfile.read(path)
if len(audio.shape) == 2:
audio = audio.mean(1) # make mono
# audio = self.get_non_zero(audio)
# audio = audio[:self.model_srate*15]
# audio = self.mp3_to_wav(path)
# print(audio[:100])
audio = np.pad(audio, 512, mode='constant', constant_values=0)
audio_len = len(audio)
audio = audio[slice_ind * model_srate * cuttoff:(slice_ind + 1) * model_srate * cuttoff]
if (slice_ind + 1) * model_srate * cuttoff >= audio_len:
slice_ind = -1
# audio = audio[: self.model_srate*self.cutoff]
hop_length = int(model_srate * step_size)
n_frames = 1 + int((len(audio) - 1024) / hop_length)
frames = as_strided(audio, shape=(1024, n_frames),
strides=(audio.itemsize, hop_length * audio.itemsize))
frames = frames.transpose().copy()
frames -= np.mean(frames, axis=1)[:, np.newaxis]
frames /= (np.std(frames, axis=1)[:, np.newaxis] + 1e-5)
return frames, slice_ind + 1
def get_chroma(audio, sr):
# logC = librosa.amplitude_to_db(np.abs(C))
# plt.figure(figsize=(15, 5))
# librosa.display.specshow(logC, sr=sr, x_axis='time', y_axis='cqt_note', fmin=fmin, cmap='coolwarm')
# hop_length = 512
# chromagram = librosa.feature.chroma_cqt(audio, sr=sr, hop_length=hop_length)
# plt.figure(figsize=(15, 5))
# librosa.display.specshow(chromagram, x_axis='time', y_axis='chroma', hop_length=hop_length, cmap='coolwarm')
hop_length = 512
chromagram = librosa.feature.chroma_cens(audio, sr=sr, hop_length=hop_length, n_chroma=60, bins_per_octave=60)
# plt.figure(figsize=(15, 5))
# librosa.display.specshow(chromagram, sr=sr,x_axis='time', y_axis='chroma', hop_length=hop_length, cmap='coolwarm',bins_per_octave=60)
return chromagram
def predict(audio, sr, model_capacity='full',
viterbi=False, center=True, step_size=10, verbose=1):
"""
Perform pitch estimation on given audio
Parameters
----------
audio : np.ndarray [shape=(N,) or (N, C)]
The audio samples. Multichannel audio will be downmixed.
sr : int
Sample rate of the audio samples. The audio will be resampled if
the sample rate is not 16 kHz, which is expected by the model.
model_capacity : 'tiny', 'small', 'medium', 'large', or 'full'
String specifying the model capacity; see the docstring of
:func:`~crepe.core.build_and_load_model`
viterbi : bool
Apply viterbi smoothing to the estimated pitch curve. False by default.
center : boolean
- If `True` (default), the signal `audio` is padded so that frame
`D[:, t]` is centered at `audio[t * hop_length]`.
- If `False`, then `D[:, t]` begins at `audio[t * hop_length]`
step_size : int
The step size in milliseconds for running pitch estimation.
verbose : int
Set the keras verbosity mode: 1 (default) will print out a progress bar
during prediction, 0 will suppress all non-error printouts.
Returns
-------
A 4-tuple consisting of:
time: np.ndarray [shape=(T,)]
The timestamps on which the pitch was estimated
frequency: np.ndarray [shape=(T,)]
The predicted pitch values in Hz
confidence: np.ndarray [shape=(T,)]
The confidence of voice activity, between 0 and 1
activation: np.ndarray [shape=(T, 360)]
The raw activation matrix
"""
activation = get_activation(audio, sr, model_capacity=model_capacity,
center=center, step_size=step_size,
verbose=verbose)
confidence = activation.max(axis=1)
if viterbi:
cents = to_viterbi_cents(activation)
else:
cents = to_local_average_cents(activation)
frequency = 10 * 2 ** (cents / 1200)
frequency[np.isnan(frequency)] = 0
time = np.arange(confidence.shape[0]) * step_size / 1000.0
# z = np.reshape(activation, [-1, 6, 60])
# z = np.mean(z, axis=1) #(None, 60)
# z = np.reshape(z, [-1,12,5])
# z = np.mean(z, axis=2) # (None, 12)
zarg = np.argmax(activation, axis=1)
zarg = zarg % 60
zarg = zarg / 5
# ((((cents - 1997.3794084376191) / 20) % 60) / 5)
return time, frequency, zarg, confidence, activation
def process_file(file, output=None, model_capacity='full', viterbi=False,
center=True, save_activation=False, save_plot=False,
plot_voicing=False, step_size=10, verbose=True):
"""
Use the input model to perform pitch estimation on the input file.
Parameters
----------
file : str
Path to WAV file to be analyzed.
output : str or None
Path to directory for saving output files. If None, output files will
be saved to the directory containing the input file.
model_capacity : 'tiny', 'small', 'medium', 'large', or 'full'
String specifying the model capacity; see the docstring of
:func:`~crepe.core.build_and_load_model`
viterbi : bool
Apply viterbi smoothing to the estimated pitch curve. False by default.
center : boolean
- If `True` (default), the signal `audio` is padded so that frame
`D[:, t]` is centered at `audio[t * hop_length]`.
- If `False`, then `D[:, t]` begins at `audio[t * hop_length]`
save_activation : bool
Save the output activation matrix to an .npy file. False by default.
save_plot : bool
Save a plot of the output activation matrix to a .png file. False by
default.
plot_voicing : bool
Include a visual representation of the voicing activity detection in
the plot of the output activation matrix. False by default, only
relevant if save_plot is True.
step_size : int
The step size in milliseconds for running pitch estimation.
verbose : bool
Print status messages and keras progress (default=True).
Returns
-------
"""
try:
sr, audio = wavfile.read(file)
except ValueError:
print("CREPE: Could not read %s" % file, file=sys.stderr)
raise
time, frequency, cents, confidence, activation = predict(
audio, sr,
model_capacity=model_capacity,
viterbi=viterbi,
center=center,
step_size=step_size,
verbose=1 * verbose)
# write prediction as TSV
f0_file = output_path(file, ".f0.csv", output)
f0_data = np.vstack([time, frequency, cents, confidence]).transpose()
np.savetxt(f0_file, f0_data, fmt=['%.3f', '%.3f', '%.6f', '%.6f'], delimiter=',',
header='time,frequency,cents,confidence', comments='')
if verbose:
print("CREPE: Saved the estimated frequencies and confidence values "
"at {}".format(f0_file))
# save the salience file to a .npy file
if save_activation:
activation_path = output_path(file, ".activation.npy", output)
np.save(activation_path, activation)
if verbose:
print("CREPE: Saved the activation matrix at {}".format(
activation_path))
# save the salience visualization in a PNG file
if save_plot:
import matplotlib.cm
from imageio import imwrite
plot_file = output_path(file, ".activation.png", output)
# to draw the low pitches in the bottom
salience = np.flip(activation, axis=1)
inferno = matplotlib.cm.get_cmap('inferno')
image = | |
<reponame>Horia73/MultiCuberX
from tkinter import *
from tkmacosx import Button
import math
import sys
class Window(object):
def __init__(self, master):
self.master = master
self.master.title("RUBIKon Project")
self.master.geometry("1200x900+250+80")
self.master.config(bg="#fff")
self.master.resizable(0, 0)
self.AddCubes()
def AddCubes(self):
self.TopFrame = Frame(self.master, bg="#42f498", width=1200, height=100)
self.TopFrame.grid(column=0, row=1)
self.TextLabel = Label(self.master, text="Please select a cube:", font=("Laksaman",25), fg="#333", bg="#fff")
self.TextLabel.grid(column=0, row=2, padx=10, pady=10)
self.x3 = Button(self.master, text="3x3x3",font=("Laksaman",20), width=400, height=80, bd=0, bg="#42f498", fg="#333",
command=self.cub3)
self.x3.grid(column=0, row=3, padx=10, pady=10)
self.x4 = Button(self.master, text="4x4x4",font=("Laksaman",20), width=400, height=80, bd=0, bg="#FFD700", fg="#333",
command=self.cub4)
self.x4.grid(column=0, row=4, padx=10, pady=10)
self.x5 = Button(self.master, text="5x5x5",font=("Laksaman",20), width=400, height=80, bd=0, bg="#FF7F50", fg="#333",
command=self.cub5)
self.x5.grid(column=0, row=5, padx=10, pady=10)
self.close = Button(self.master, text="Exit",font=("Laksaman",20), width=400, height=80, bd=0, bg="#4d79ff", fg="#333",
command=self.closeall)
self.close.grid(column=0, row=10, padx=10, pady=10)
def closeall(self):
sys.exit()
def clos3(self):
cube3.close()
sys.exit()
def clos4(self):
cube4.close()
sys.exit()
def clos5(self):
cube5.close()
sys.exit()
def cub3(self):
import cube3
global cube3
cube3.prepare()
self.TextLabel = Label(self.master, text="Please select the speed of motors:", font=("Laksaman",25), fg="#333",
bg="#fff")
self.TextLabel.grid(column=0, row=2, padx=10, pady=10)
self.TopFrame = Frame(self.master, bg="#42f498", width=1200, height=100)
self.TopFrame.grid(column=0, row=1)
self.Slow = Button(self.master, text="Slow mode",font=("Laksaman",20), width=400, height=80, bd=0, bg="#42f498", fg="#333",
command=self.small3)
self.Slow.grid(column=0, row=3, padx=10, pady=10)
self.Medium = Button(self.master, text="Medium mode",font=("Laksaman",20), width=400, height=80, bd=0, bg="#FFD700", fg="#333",
command=self.mid3)
self.Medium.grid(column=0, row=4, padx=10, pady=10)
self.Fast = Button(self.master, text="Fast mode",font=("Laksaman",20), width=400, height=80, bd=0, bg="#FF7F50", fg="#333",
command=self.big3)
self.Fast.grid(column=0, row=5, padx=10, pady=10)
self.close3 = Button(self.master, text="Exit",font=("Laksaman",20), width=400, height=80, bd=0, bg="#4d79ff", fg="#333",
command=self.clos3)
self.close3.grid(column=0, row=10, padx=10, pady=10)
self.close.destroy()
def cub4(self):
import cube4
global cube4
cube4.prepare()
self.TextLabel = Label(self.master, text="Please select the speed of motors:", font=("Laksaman",20), fg="#333",
bg="#fff")
self.TextLabel.grid(column=0, row=2, padx=10, pady=10)
self.TopFrame = Frame(self.master, bg="#42f498", width=1200, height=100)
self.TopFrame.grid(column=0, row=1)
self.Slow = Button(self.master, text="Slow mode",font=("Laksaman",20), width=400, height=80, bd=0, bg="#42f498", fg="#333",
command=self.small4)
self.Slow.grid(column=0, row=3, padx=10, pady=10)
self.Medium = Button(self.master, text="Medium mode",font=("Laksaman",20), width=400, height=80, bd=0, bg="#FFD700", fg="#333",
command=self.mid4)
self.Medium.grid(column=0, row=4, padx=10, pady=10)
self.Fast = Button(self.master, text="Fast mode",font=("Laksaman",20), width=400, height=80, bd=0, bg="#FF7F50", fg="#333",
command=self.big4)
self.Fast.grid(column=0, row=5, padx=10, pady=10)
self.close4 = Button(self.master, text="Exit",font=("Laksaman",20), width=400, height=80, bd=0, bg="#4d79ff", fg="#333",
command=self.clos4)
self.close4.grid(column=0, row=10, padx=10, pady=10)
self.close.destroy()
def cub5(self):
import cube5
global cube5
cube5.prepare()
self.TextLabel = Label(self.master, text="Please select the speed of motors:", font=("Laksaman",20), fg="#333",
bg="#fff")
self.TextLabel.grid(column=0, row=2, padx=10, pady=10)
self.TopFrame = Frame(self.master, bg="#42f498", width=1200, height=100)
self.TopFrame.grid(column=0, row=1)
self.Slow = Button(self.master, text="Slow mode",font=("Laksaman",20), width=400, height=80, bd=0, bg="#42f498", fg="#333",
command=self.small5)
self.Slow.grid(column=0, row=3, padx=10, pady=10)
self.Medium = Button(self.master, text="Medium mode",font=("Laksaman",20), width=400, height=80, bd=0, bg="#FFD700", fg="#333",
command=self.mid5)
self.Medium.grid(column=0, row=4, padx=10, pady=10)
self.Fast = Button(self.master, text="Fast mode",font=("Laksaman",20), width=400, height=80, bd=0, bg="#FF7F50", fg="#333",
command=self.big5)
self.Fast.grid(column=0, row=5, padx=10, pady=10)
self.close5 = Button(self.master, text="Exit",font=("Laksaman",20), width=400, height=80, bd=0, bg="#4d79ff", fg="#333",
command=self.clos5)
self.close5.grid(column=0, row=10, padx=10, pady=10)
self.close.destroy()
def first3(self):
cube3.scanner()
cube3.analyzer()
a = cube3.solver()
if a > 60:
m = a / 60
m = math.floor(m)
s = a - m * 60
else:
m = 0
s = a
if m == 1:
minutes = ' minute and '
else:
minutes = ' minutes and '
self.TextLabel.config(
text="C U B E S O L V E D ! Total time: " + str(round(m)) + str(minutes) + str(round(s, 2)) + ' seconds.')
def first4(self):
cube4.scanner()
cube4.analyzer()
a = cube4.solver()
if a > 60:
m = a / 60
m = math.floor(m)
s = a - m * 60
else:
m = 0
s = a
if m == 1:
minutes = ' minute and '
else:
minutes = ' minutes and '
self.TextLabel.config(
text="C U B E S O L V E D ! Total time: " + str(round(m)) + str(minutes) + str(round(s, 2)) + ' seconds.')
def first5(self):
cube5.scanner()
cube5.analyzer()
a = cube5.solver()
if a > 60:
m = a / 60
m = math.floor(m)
s = a - m * 60
else:
m = 0
s = a
if m == 1:
minutes = ' minute and '
else:
minutes = ' minutes and '
self.TextLabel.config(
text="C U B E S O L V E D ! Total time: " + str(round(m)) + str(minutes) + str(round(s, 2)) + ' seconds.')
def custom3(self):
self.TextLabel.config(text="Please select a pattern below:")
self.TopFrame = Frame(self.master, bg="#EE82EE", width=1200, height=100)
self.TopFrame.grid(column=0, row=1)
self.p2 = Button(self.master, text="Checkboard",font=("Laksaman",20), width=400, height=80, bd=0, bg="#EE82EE", fg="#333",
command=self.check3)
self.p2.grid(column=0, row=3, padx=10, pady=10)
self.p4 = Button(self.master, text="Six Spots",font=("Laksaman",20), width=400, height=80, bd=0, bg="#FF7F50", fg="#333",
command=self.spots3)
self.p4.grid(column=0, row=4, padx=10, pady=10)
self.p3 = Button(self.master, text="Union Jack",font=("Laksaman",20), width=400, height=80, bd=0, bg="#FFD700", fg="#333",
command=self.union3)
self.p3.grid(column=0, row=5, padx=10, pady=10)
self.Back = Button(self.master, text="Back",font=("Laksaman",20), width=400, height=80, bd=0, bg="#40E0D0", fg="#333",
command=self.back3)
self.Back.grid(column=0, row=9, padx=10, pady=10)
self.p1 = Button(self.master, text="Six Crosses",font=("Laksaman",20), width=400, height=80, bd=0, bg="#42f498", fg="#333",
command=self.crosses3)
self.p1.grid(column=0, row=6, padx=10, pady=10)
self.pattern.destroy()
self.solve.destroy()
def custom4(self):
self.TextLabel.config(text="Please select a pattern below:")
self.TopFrame = Frame(self.master, bg="#EE82EE", width=1200, height=100)
self.TopFrame.grid(column=0, row=1)
self.p2 = Button(self.master, text="Cube in cube",font=("Laksaman",20), width=400, height=80, bd=0, bg="#EE82EE", fg="#333",
command=self.check4)
self.p2.grid(column=0, row=3, padx=10, pady=10)
self.p4 = Button(self.master, text="Six Spots",font=("Laksaman",20), width=400, height=80, bd=0, bg="#FF7F50", fg="#333",
command=self.spots4)
self.p4.grid(column=0, row=4, padx=10, pady=10)
self.p3 = Button(self.master, text="Union Jack",font=("Laksaman",20), width=400, height=80, bd=0, bg="#FFD700", fg="#333",
command=self.union4)
self.p3.grid(column=0, row=5, padx=10, pady=10)
self.Back = Button(self.master, text="Back",font=("Laksaman",20), width=400, height=80, bd=0, bg="#40E0D0", fg="#333",
command=self.back4)
self.Back.grid(column=0, row=9, padx=10, pady=10)
self.p1 = Button(self.master, text="Six Crosses",font=("Laksaman",20), width=400, height=80, bd=0, bg="#42f498", fg="#333",
command=self.crosses4)
self.p1.grid(column=0, row=6, padx=10, pady=10)
self.pattern.destroy()
self.solve.destroy()
def custom5(self):
self.TextLabel.config(text="Please select a pattern below:")
self.TopFrame = Frame(self.master, bg="#EE82EE", width=1200, height=100)
self.TopFrame.grid(column=0, row=1)
self.TopFrame2 = Frame(self.master, bg="#EE82EE", width=600, height=60)
self.TopFrame2.grid(column=0, row=1)
self.p2 = Button(self.master, text="Checkboard",font=("Laksaman",20), width=400, height=80, bd=0, bg="#EE82EE", fg="#333",
command=self.check5)
self.p2.grid(column=0, row=3, padx=10, pady=10)
self.p4 = Button(self.master, text="Six Spots",font=("Laksaman",20), width=400, height=80, bd=0, bg="#FF7F50", fg="#333",
command=self.spots5)
self.p4.grid(column=0, row=4, padx=10, pady=10)
self.p3 = Button(self.master, text="Union Jack",font=("Laksaman",20), width=400, height=80, bd=0, bg="#FFD700", fg="#333",
command=self.union5)
self.p3.grid(column=0, row=5, padx=10, pady=10)
self.Back = Button(self.master, text="Back",font=("Laksaman",20), width=400, height=80, bd=0, bg="#40E0D0", fg="#333",
command=self.back5)
self.Back.grid(column=0, row=9, padx=10, pady=10)
self.p1 = Button(self.master, text="Six Crosses",font=("Laksaman",20), width=400, height=80, bd=0, bg="#42f498", fg="#333",
command=self.crosses5)
self.p1.grid(column=0, row=6, padx=10, pady=10)
self.pattern.destroy()
self.solve.destroy()
def crosses3(self):
cube3.pattern4()
self.TextLabel.config(text="C U B E S O L V E D !")
def check3(self):
cube3.pattern1()
self.TextLabel.config(text="C U B E S O L V E D !")
def spots3(self):
cube3.pattern2()
self.TextLabel.config(text="C U B E S O L V E D !")
def union3(self):
cube3.pattern3()
self.TextLabel.config(text="C U B E S O L V E D !")
def crosses4(self):
cube4.pattern4()
self.TextLabel.config(text="C U B E S O L V E D !")
def check4(self):
cube4.pattern1()
self.TextLabel.config(text="C U B E S O L V E D !")
def spots4(self):
cube4.pattern2()
self.TextLabel.config(text="C U B E S O L V E D !")
def union4(self):
cube4.pattern3()
self.TextLabel.config(text="C U B E S O L V E D !")
def crosses5(self):
cube5.pattern4()
self.TextLabel.config(text="C U B E S O L V E D !")
def check5(self):
cube5.pattern1()
self.TextLabel.config(text="C U B E S O L V E D !")
def spots5(self):
cube5.pattern2()
self.TextLabel.config(text="C U B E S O L V E D !")
def union5(self):
cube5.pattern3()
self.TextLabel.config(text="C U B E S O L V E D !")
def back3(self):
self.TextLabel.config(text="Please select the speed of motors:")
self.TopFrame = Frame(self.master, bg="#42f498", width=1200, height=100)
self.TopFrame.grid(column=0, row=1)
self.Slow = Button(self.master, text="Slow mode",font=("Laksaman",20), width=400, height=80, bd=0, bg="#42f498", fg="#333",
command=self.small3)
self.Slow.grid(column=0, row=3, padx=10, pady=10)
self.Medium = Button(self.master, text="Medium mode",font=("Laksaman",20), width=400, height=80, bd=0, bg="#FFD700", fg="#333",
command=self.mid3)
self.Medium.grid(column=0, row=4, padx=10, pady=10)
self.Fast = Button(self.master, text="Fast mode",font=("Laksaman",20), width=400, height=80, bd=0, bg="#FF7F50", fg="#333",
command=self.big3)
self.Fast.grid(column=0, row=5, padx=10, pady=10)
self.solve = Button(self.master, text="Solve",font=("Laksaman",20), width=400, height=80, bd=0, bg="#EE82EE", fg="#333",
command=self.first3)
self.solve.grid(column=0, row=7, padx=10, pady=10)
self.bck2 = Button(self.master, text="Back",font=("Laksaman",20), width=400, height=80, bd=0, bg="#40E0D0", fg="#333",
command=self.back23)
self.bck2.grid(column=0, row=9, padx=10, pady=10)
self.pattern = Button(self.master, text="Custom pattern",font=("Laksaman",20), width=400, height=80, bd=0, bg="#66b3ff", fg="#333",
command=self.custom3)
self.pattern.grid(column=0, row=8, padx=10, pady=10)
self.p1.destroy()
self.p2.destroy()
self.p3.destroy()
self.p4.destroy()
self.Back.destroy()
def back4(self):
self.TextLabel.config(text="Please select the speed of motors:")
self.TopFrame = Frame(self.master, bg="#42f498", width=400, height=60)
self.TopFrame.grid(column=1, row=1)
self.TopFrame2 = Frame(self.master, bg="#42f498", width=600, height=60)
self.TopFrame2.grid(column=0, row=1)
self.Slow = Button(self.master, text="Slow mode",font=("Laksaman",20), width=400, height=80, bd=0, bg="#42f498", fg="#333",
command=self.small4)
self.Slow.grid(column=0, row=3, padx=10, pady=10)
self.Medium = Button(self.master, text="Medium mode",font=("Laksaman",20), width=400, height=80, bd=0, bg="#FFD700", fg="#333",
command=self.mid4)
self.Medium.grid(column=0, row=4, padx=10, pady=10)
self.Fast = Button(self.master, text="Fast mode",font=("Laksaman",20), width=400, height=80, bd=0, bg="#FF7F50", fg="#333",
command=self.big4)
self.Fast.grid(column=0, row=5, padx=10, pady=10)
self.solve = Button(self.master, text="Solve",font=("Laksaman",20), width=400, height=80, bd=0, bg="#EE82EE", fg="#333",
command=self.first4)
self.solve.grid(column=0, row=7, padx=10, pady=10)
self.bck2 = Button(self.master, text="Back",font=("Laksaman",20), width=400, height=80, bd=0, bg="#40E0D0", fg="#333",
command=self.back24)
self.bck2.grid(column=0, row=9, padx=10, pady=10)
self.pattern = Button(self.master, text="Custom pattern",font=("Laksaman",20), width=400, height=80, bd=0, bg="#66b3ff", fg="#333",
| |
"default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-3_f4142",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-31-cap25": {
"ap_mac": "6c71.edff.1ff6",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-32-cap3": {
"ap_mac": "6c71.edff.1f0b",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-32-cap32": {
"ap_mac": "6c71.edff.1f1f",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-3_f4142",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-21-cap42": {
"ap_mac": "6c71.edff.1f3b",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-2_88bd2",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-31-cap34": {
"ap_mac": "6c71.edff.1f47",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-11-cap23": {
"ap_mac": "6c71.edff.1f57",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-1_87d8f",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-31-cap22": {
"ap_mac": "6c71.edff.1f5b",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-32-cap6": {
"ap_mac": "6c71.edff.1f5f",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-21-cap15": {
"ap_mac": "6c71.edff.1f63",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-31-cap32": {
"ap_mac": "6c71.edff.1f67",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-22-cap23": {
"ap_mac": "6c71.edff.1f6b",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-2_88bd2",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-12-cap31": {
"ap_mac": "6c71.edff.1f73",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-22-cap1": {
"ap_mac": "6c71.edff.1f77",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-2_88bd2",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-21-cap6": {
"ap_mac": "6c71.edff.1f7b",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-12-cap35": {
"ap_mac": "6c71.edff.1f7f",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-22-cap25": {
"ap_mac": "6c71.edff.1f83",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-32-cap9": {
"ap_mac": "6c71.edff.1f87",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-31-cap24": {
"ap_mac": "6c71.edff.1fa3",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-11-cap5": {
"ap_mac": "6c71.edff.1fab",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-21-cap39": {
"ap_mac": "6c71.edff.1fbf",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-2_88bd2",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-11-cap20": {
"ap_mac": "6c71.edff.1fc3",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-1_87d8f",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-12-cap17": {
"ap_mac": "6c71.edff.234f",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-1_87d8f",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-32-cap5": {
"ap_mac": "6c71.edff.2543",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-31-cap2": {
"ap_mac": "6c71.edff.259f",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-3_f4142",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-12-cap26": {
"ap_mac": "6c71.edff.3997",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-22-cap11": {
"ap_mac": "6c71.edff.39af",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-2_88bd2",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-21-cap14": {
"ap_mac": "6c71.edff.39c7",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-21-cap45": {
"ap_mac": "6c71.edff.39cf",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-2_88bd2",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-21-cap18": {
"ap_mac": "6c71.edff.3aee",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-11-cap25": {
"ap_mac": "6c71.edff.3af2",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-1_87d8f",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-11-cap19": {
"ap_mac": "6c71.edff.3a13",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-11-cap7": {
"ap_mac": "6c71.edff.3a1f",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-21-cap29": {
"ap_mac": "6c71.edff.3a27",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-2_88bd2",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-11-cap21": {
"ap_mac": "6c71.edff.3a2b",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-1_87d8f",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-31-cap9": {
"ap_mac": "6c71.edff.3a33",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-3_f4142",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-22-cap30": {
"ap_mac": "6c71.edff.3a4f",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-32-cap16": {
"ap_mac": "6c71.edff.3a7f",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-11-cap18": {
"ap_mac": "6c71.edff.3a87",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-22-cap3": {
"ap_mac": "6c71.edff.3a93",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-2_88bd2",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-12-cap25": {
"ap_mac": "6c71.edff.3bdf",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-31-cap20": {
"ap_mac": "6c71.edff.3c23",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-3_f4142",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-31-cap6": {
"ap_mac": "6c71.edff.3c2b",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-3_f4142",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-31-cap4": {
"ap_mac": "6c71.edff.3c37",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-3_f4142",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-12-cap14": {
"ap_mac": "6c71.edff.3c3b",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-1_87d8f",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-12-cap12": {
"ap_mac": "6c71.edff.3c43",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-1_87d8f",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-12-cap28": {
"ap_mac": "6c71.edff.3c47",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-32-cap11": {
"ap_mac": "6c71.edff.3c4b",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-32-cap8": {
"ap_mac": "6c71.edff.3c57",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-12-cap10": {
"ap_mac": "6c71.edff.3c5b",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-1_87d8f",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-12-cap27": {
"ap_mac": "6c71.edff.3c5f",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-12-cap5": {
"ap_mac": "6c71.edff.3c63",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-1_87d8f",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-12-cap9": {
"ap_mac": "6c71.edff.3c6f",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-1_87d8f",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-32-cap31": {
"ap_mac": "6c71.edff.3c7b",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-3_f4142",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-31-cap17": {
"ap_mac": "6c71.edff.3c7f",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-3_f4142",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-12-cap33": {
"ap_mac": "6c71.edff.3c8b",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-11-cap31": {
"ap_mac": "6c71.edff.3c97",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-1_87d8f",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-12-cap13": {
"ap_mac": "6c71.edff.3ddb",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-1_87d8f",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-11-cap16": {
"ap_mac": "6c71.edff.3e4b",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-32-cap18": {
"ap_mac": "6c71.edff.3ff2",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-31-cap21": {
"ap_mac": "6c71.edff.40c3",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-3_f4142",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-12-cap4": {
"ap_mac": "6c71.edff.41f6",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-1_87d8f",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-12-cap23": {
"ap_mac": "6c71.edff.4123",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-11-cap15": {
"ap_mac": "6c71.edff.412b",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-31-cap31": {
"ap_mac": "6c71.edff.4133",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-21-cap8": {
"ap_mac": "6c71.edff.433b",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-12-cap22": {
"ap_mac": "6c71.edff.434b",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-22-cap22": {
"ap_mac": "6c71.edff.435f",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-31-cap33": {
"ap_mac": "6c71.edff.436b",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-21-cap20": {
"ap_mac": "6c71.edff.4377",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-21-cap41": {
"ap_mac": "6c71.edff.437b",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-2_88bd2",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-12-cap20": {
"ap_mac": "6c71.edff.4383",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-11-cap10": {
"ap_mac": "6c71.edff.43b7",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-21-cap2": {
"ap_mac": "6c71.edff.43bf",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-31-cap7": {
"ap_mac": "6c71.edff.43cb",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-3_f4142",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-11-cap14": {
"ap_mac": "6c71.edff.43db",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-11-cap13": {
"ap_mac": "6c71.edff.43df",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-21-cap13": {
"ap_mac": "6c71.0fff.de61",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-22-cap21": {
"ap_mac": "6c71.0fff.de75",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-2_88bd2",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-32-cap27": {
"ap_mac": "6c71.0fff.de99",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-3_f4142",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-21-cap46": {
"ap_mac": "6c71.0fff.deb5",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-2_88bd2",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-21-cap37": {
"ap_mac": "6c71.0fff.e53d",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-2_88bd2",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-22-cap19": {
"ap_mac": "6c71.0fff.e571",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-2_88bd2",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-12-cap15": {
"ap_mac": "6c71.0fff.edbd",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-1_87d8f",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-22-cap14": {
"ap_mac": "6c71.0fff.edd1",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-2_88bd2",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-22-cap4": {
"ap_mac": "6c71.0fff.eef8",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-2_88bd2",
"rf_tag_name": "Standard",
| |
which to apply --aggregation, in nDnHnMnS shorthand or full ISO8601 format.
examples:
- name: Update a classic metric-based alert rule. (autogenerated)
text: |
az monitor alert update --email-service-owners true --name MyAlertRule --resource-group MyResourceGroup
crafted: true
- name: Update a classic metric-based alert rule. (autogenerated)
text: |
az monitor alert update --name MyAlertRule --remove-action email <EMAIL> --resource-group MyResourceGroup
crafted: true
- name: Update a classic metric-based alert rule. (autogenerated)
text: |
az monitor alert update --name MyAlertRule --resource-group MyResourceGroup --set retentionPolicy.days=365
crafted: true
"""
helps['monitor autoscale'] = """
type: group
short-summary: Manage autoscale settings.
long-summary: >
For more information on autoscaling, visit: https://docs.microsoft.com/azure/monitoring-and-diagnostics/monitoring-understanding-autoscale-settings
"""
helps['monitor autoscale create'] = """
type: command
short-summary: Create new autoscale settings.
long-summary: >
For more information on autoscaling, visit: https://docs.microsoft.com/azure/monitoring-and-diagnostics/monitoring-understanding-autoscale-settings
parameters:
- name: --action -a
short-summary: Add an action to fire when a scaling event occurs.
long-summary: |
Usage: --action TYPE KEY [ARG ...]
Email: --action email <EMAIL> <EMAIL>
Webhook: --action webhook https://www.contoso.com/alert apiKey=value
Webhook: --action webhook https://www.contoso.com/alert?apiKey=value
Multiple actions can be specified by using more than one `--action` argument.
examples:
- name: Create autoscale settings to scale between 2 and 5 instances (3 as default). Email the administrator when scaling occurs.
text: |
az monitor autoscale create -g {myrg} --resource {resource-id} --min-count 2 --max-count 5 \\
--count 3 --email-administrator
az monitor autoscale rule create -g {myrg} --autoscale-name {resource-name} --scale out 1 \\
--condition "Percentage CPU > 75 avg 5m"
az monitor autoscale rule create -g {myrg} --autoscale-name {resource-name} --scale in 1 \\
--condition "Percentage CPU < 25 avg 5m"
- name: Create autoscale settings for exactly 4 instances.
text: >
az monitor autoscale create -g {myrg} --resource {resource-id} --count 4
- name: Create new autoscale settings. (autogenerated)
text: |
az monitor autoscale create --count 3 --max-count 5 --min-count 2 --name MyAutoscaleSettings --resource myScaleSet --resource-group MyResourceGroup --resource-type Microsoft.Compute/virtualMachineScaleSets
crafted: true
"""
helps['monitor autoscale profile'] = """
type: group
short-summary: Manage autoscaling profiles.
long-summary: >
For more information on autoscaling, visit: https://docs.microsoft.com/azure/monitoring-and-diagnostics/monitoring-understanding-autoscale-settings
"""
helps['monitor autoscale profile create'] = """
type: command
short-summary: Create a fixed or recurring autoscale profile.
long-summary: >
For more information on autoscaling, visit: https://docs.microsoft.com/azure/monitoring-and-diagnostics/monitoring-understanding-autoscale-settings
parameters:
- name: --timezone
short-summary: Timezone name.
populator-commands:
- az monitor autoscale profile list-timezones
- name: --recurrence -r
short-summary: When the profile recurs. If omitted, a fixed (non-recurring) profile is created.
long-summary: |
Usage: --recurrence {week} [ARG ARG ...]
Weekly: --recurrence week Sat Sun
- name: --start
short-summary: When the autoscale profile begins. Format depends on the type of profile.
long-summary: |
Fixed: --start yyyy-mm-dd [hh:mm:ss]
Weekly: [--start hh:mm]
- name: --end
short-summary: When the autoscale profile ends. Format depends on the type of profile.
long-summary: |
Fixed: --end yyyy-mm-dd [hh:mm:ss]
Weekly: [--end hh:mm]
examples:
- name: Create a fixed date profile, inheriting the default scaling rules but changing the capacity.
text: |
az monitor autoscale create -g {myrg} --resource {resource-id} --min-count 2 --count 3 \\
--max-count 5
az monitor autoscale rule create -g {myrg} --autoscale-name {name} --scale out 1 \\
--condition "Percentage CPU > 75 avg 5m"
az monitor autoscale rule create -g {myrg} --autoscale-name {name} --scale in 1 \\
--condition "Percentage CPU < 25 avg 5m"
az monitor autoscale profile create -g {myrg} --autoscale-name {name} -n Christmas \\
--copy-rules default --min-count 3 --count 6 --max-count 10 --start 2018-12-24 \\
--end 2018-12-26 --timezone "Pacific Standard Time"
- name: Create a recurring weekend profile, inheriting the default scaling rules but changing the capacity.
text: |
az monitor autoscale create -g {myrg} --resource {resource-id} --min-count 2 --count 3 \\
--max-count 5
az monitor autoscale rule create -g {myrg} --autoscale-name {name} --scale out 1 \\
--condition "Percentage CPU > 75 avg 5m"
az monitor autoscale rule create -g {myrg} --autoscale-name {name} --scale in 1 \\
--condition "Percentage CPU < 25 avg 5m"
az monitor autoscale profile create -g {myrg} --autoscale-name {name} -n weeekend \\
--copy-rules default --min-count 1 --count 2 --max-count 2 \\
--recurrence week sat sun --timezone "Pacific Standard Time"
- name: Create a fixed or recurring autoscale profile. (autogenerated)
text: |
az monitor autoscale profile create --autoscale-name MyAutoscale --copy-rules default --count 2 --end 2018-12-26 --max-count 10 --min-count 1 --name Christmas --recurrence week sat sun --resource-group MyResourceGroup --start 2018-12-24 --timezone "Pacific Standard Time"
crafted: true
- name: Create a fixed or recurring autoscale profile. (autogenerated)
text: |
az monitor autoscale profile create --autoscale-name MyAutoscale --count 2 --max-count 10 --min-count 1 --name Christmas --recurrence week sat sun --resource-group MyResourceGroup --start 2018-12-24 --subscription MySubscription --timezone "Pacific Standard Time"
crafted: true
"""
helps['monitor autoscale profile delete'] = """
type: command
short-summary: Delete an autoscale profile.
examples:
- name: Delete an autoscale profile. (autogenerated)
text: |
az monitor autoscale profile delete --autoscale-name MyAutoscale --name MyAutoscaleProfile --resource-group MyResourceGroup
crafted: true
"""
helps['monitor autoscale profile list'] = """
type: command
short-summary: List autoscale profiles.
examples:
- name: List autoscale profiles. (autogenerated)
text: |
az monitor autoscale profile list --autoscale-name MyAutoscale --resource-group MyResourceGroup
crafted: true
"""
helps['monitor autoscale profile list-timezones'] = """
type: command
short-summary: Look up time zone information.
"""
helps['monitor autoscale profile show'] = """
type: command
short-summary: Show details of an autoscale profile.
"""
helps['monitor autoscale rule'] = """
type: group
short-summary: Manage autoscale scaling rules.
long-summary: >
For more information on autoscaling, visit: https://docs.microsoft.com/azure/monitoring-and-diagnostics/monitoring-understanding-autoscale-settings
"""
helps['monitor autoscale rule copy'] = """
type: command
short-summary: Copy autoscale rules from one profile to another.
"""
helps['monitor autoscale rule create'] = """
type: command
short-summary: Add a new autoscale rule.
long-summary: >
For more information on autoscaling, visit: https://docs.microsoft.com/azure/monitoring-and-diagnostics/monitoring-understanding-autoscale-settings
parameters:
- name: --condition
short-summary: The condition which triggers the scaling action.
long-summary: >
Usage: --condition ["NAMESPACE"] METRIC {==,!=,>,>=,<,<=} THRESHOLD
{avg,min,max,total,count} PERIOD
[where DIMENSION {==,!=} VALUE [or VALUE ...]
[and DIMENSION {==,!=} VALUE [or VALUE ...] ...]]
Dimensions can be queried by adding the 'where' keyword and multiple dimensions can be queried by combining them with the 'and' keyword.
Values for METRIC and appropriate THRESHOLD values can be obtained from the `az monitor metric` command.
Format of PERIOD is "##h##m##s".
- name: --scale
short-summary: The direction and amount to scale.
long-summary: |
Usage: --scale {to,in,out} VAL[%]
Fixed Count: --scale to 5
In by Count: --scale in 2
Out by Percent: --scale out 10%
- name: --timegrain
short-summary: >
The way metrics are polled across instances.
long-summary: >
The form of the timegrain is {avg,min,max,sum} VALUE. Values can be obtained from the `az monitor metric` command.
Format of VALUE is "##h##m##s".
examples:
- name: Scale to 5 instances when the CPU Percentage across instances is greater than 75 averaged over 10 minutes.
text: |
az monitor autoscale rule create -g {myrg} --autoscale-name {myvmss} \\
--scale to 5 --condition "Percentage CPU > 75 avg 10m"
- name: Scale up 2 instances when the CPU Percentage across instances is greater than 75 averaged over 5 minutes.
text: |
az monitor autoscale rule create -g {myrg} --autoscale-name {myvmss} \\
--scale out 2 --condition "Percentage CPU > 75 avg 5m"
- name: Scale down 50% when the CPU Percentage across instances is less than 25 averaged over 15 minutes.
text: |
az monitor autoscale rule create -g {myrg} --autoscale-name {myvmss} \\
--scale in 50% --condition "Percentage CPU < 25 avg 15m"
- name: Create autoscale settings via a guest vm metric enabled from diagnostic extensions.
You can use counterSpecifier field retrieved from 'az vmss diagnostics get-default-config' in the `--condition`.
text: |
az monitor autoscale rule create -g {myrg} --autoscale-name test --scale out 1 --condition "/builtin/memory/percentavailablememory > 80 total 5m"
"""
helps['monitor autoscale rule delete'] = """
type: command
short-summary: Remove autoscale rules from a profile.
"""
helps['monitor autoscale rule list'] = """
type: command
short-summary: List autoscale rules for a profile.
examples:
- name: List autoscale rules for a profile. (autogenerated)
text: |
az monitor autoscale rule list --autoscale-name MyAutoscale --profile-name MyProfile --resource-group MyResourceGroup
crafted: true
"""
helps['monitor autoscale show'] = """
type: command
short-summary: Show autoscale setting details.
examples:
- name: Show autoscale setting details. (autogenerated)
text: |
az monitor autoscale show --name MyAutoscaleSettings --resource-group MyResourceGroup
crafted: true
"""
helps['monitor autoscale update'] = """
type: command
short-summary: Update autoscale settings.
long-summary: >
For more information on autoscaling, visit: https://docs.microsoft.com/azure/monitoring-and-diagnostics/monitoring-understanding-autoscale-settings
parameters:
- name: --add-action -a
short-summary: Add an action to fire when a scaling event occurs.
long-summary: |
Usage: --add-action TYPE | |
<reponame>bjdarrer/tf2-model-g
import warnings
import tensorflow as tf
import numpy as np
import util
import io # BJD added 18.11.2020
from pde_solver import PDESolverDx
from integrators.model_g import polynomial_order_4_centered as reaction_integrator
from integrators.model_g import steady_state
#from render_video import c1
#c1 = 0
DEFAULT_PARAMS = {
"A": 3.42,
"B": 14.5,
"k2": 1.0,
"k-2": 0.1,
"k5": 0.9,
"D_G": 1.0,
"D_X": 1.0,
"D_Y": 2.0,
"density_G": 2.0,
"density_X": 1.0,
"density_Y": 1.5,
"base-density": 6.0,
"viscosity": 0.1,
"speed-of-sound": 0.2,
}
class FluidModelG(PDESolverDx):
"""
Model G on a fluid medium
"""
def __init__(self, concentration_G, concentration_X, concentration_Y, u, dx, dt=None, params=None, source_functions=None):
if dt is None:
dt = 0.1 * dx
if dt > 0.5 * dx:
warnings.warn("Time increment {} too large for simulation stability with grid constant {}".format(dt, dx))
super().__init__(dx, dt, concentration_G.shape)
self.params = params or DEFAULT_PARAMS
self.source_functions = source_functions or {}
self.G = tf.constant(concentration_G, 'float64')
self.X = tf.constant(concentration_X, 'float64')
self.Y = tf.constant(concentration_Y, 'float64')
G0, X0, Y0 = steady_state(self.params['A'], self.params['B'], self.params['k2'], self.params['k-2'], self.params['k5'])
if len(u) != self.dims:
raise ValueError("{0}-dimensional flow must have {0} components".format(self.dims))
c2 = self.params["speed-of-sound"]**2
viscosity = self.params["viscosity"]
if self.dims == 1:
raise ValueError("1D not supported")
elif self.dims == 2:
self.u = tf.constant(u[0], 'float64')
self.v = tf.constant(u[1], 'float64')
omega_x, omega_y = self.omega_x, self.omega_y
omega2 = omega_x**2 + omega_y**2
omega2_x = tf.constant(omega2 + 1/3 * omega_x * (omega_x + omega_y), "complex128")
omega2_y = tf.constant(omega2 + 1/3 * omega_y * (omega_x + omega_y), "complex128")
decay_x = tf.exp(-viscosity * omega2_x * self.dt)
decay_y = tf.exp(-viscosity * omega2_y * self.dt)
delta = tf.constant(-omega2 * self.dt, "complex128")
decay_G = tf.exp(self.params['D_G'] * delta)
decay_X = tf.exp(self.params['D_X'] * delta)
decay_Y = tf.exp(self.params['D_Y'] * delta)
def flow_integrator(rho, u, v):
"""
Flow is integrated with respect to the total log density (rho)
"""
# Enter Fourier Domain
f_rho = self.fft(tf.cast(rho, 'complex128'))
waves_x = self.fft(tf.cast(u, 'complex128'))
waves_y = self.fft(tf.cast(v, 'complex128'))
# Viscosity and internal shear
waves_x *= decay_x
waves_y *= decay_y
# Exit Fourier Domain
u = tf.cast(self.ifft(waves_x), 'float64')
v = tf.cast(self.ifft(waves_y), 'float64')
# Calculate gradients
rho_dx = tf.cast(self.ifft(f_rho * self.kernel_dx), 'float64')
rho_dy = tf.cast(self.ifft(f_rho * self.kernel_dy), 'float64')
u_dx = tf.cast(self.ifft(waves_x * self.kernel_dx), 'float64')
u_dy = tf.cast(self.ifft(waves_x * self.kernel_dy), 'float64')
v_dx = tf.cast(self.ifft(waves_y * self.kernel_dx), 'float64')
v_dy = tf.cast(self.ifft(waves_y * self.kernel_dy), 'float64')
divergence = u_dx + v_dy
# This would handle log density continuity but it's actually handled individually for G, X and Y
# rho -= (u*rho_dx + v*rho_dy + divergence) * self.dt
# Self-advect flow
du = -u*u_dx - v*u_dy
dv = -u*v_dx - v*v_dy
# Propagate pressure waves
du -= c2 * rho_dx
dv -= c2 * rho_dy
# Apply strain
du += viscosity * (rho_dx * (u_dx + u_dx) + rho_dy * (u_dy + v_dx) - 2/3*rho_dx * divergence)
dv += viscosity * (rho_dx * (v_dx + u_dy) + rho_dy * (v_dy + v_dy) - 2/3*rho_dy * divergence)
u += du * self.dt
v += dv * self.dt
return u, v, divergence
def diffusion_advection_integrator(G, X, Y, u, v, divergence):
f_G = self.fft(tf.cast(G, 'complex128'))
f_X = self.fft(tf.cast(X, 'complex128'))
f_Y = self.fft(tf.cast(Y, 'complex128'))
f_G *= decay_G
f_X *= decay_X
f_Y *= decay_Y
G = tf.cast(self.ifft(f_G), 'float64')
X = tf.cast(self.ifft(f_X), 'float64')
Y = tf.cast(self.ifft(f_Y), 'float64')
G_dx = tf.cast(self.ifft(f_G * self.kernel_dx), 'float64')
G_dy = tf.cast(self.ifft(f_G * self.kernel_dy), 'float64')
X_dx = tf.cast(self.ifft(f_X * self.kernel_dx), 'float64')
X_dy = tf.cast(self.ifft(f_X * self.kernel_dy), 'float64')
Y_dx = tf.cast(self.ifft(f_Y * self.kernel_dx), 'float64')
Y_dy = tf.cast(self.ifft(f_Y * self.kernel_dy), 'float64')
G -= (u*G_dx + v*G_dy + G*divergence) * self.dt
X -= (u*X_dx + v*X_dy + X*divergence) * self.dt
Y -= (u*Y_dx + v*Y_dy + Y*divergence) * self.dt
return G, X, Y
elif self.dims == 3:
self.u = tf.constant(u[0], 'float64')
self.v = tf.constant(u[1], 'float64')
self.w = tf.constant(u[2], 'float64')
omega_x, omega_y, omega_z = self.omega_x, self.omega_y, self.omega_z
omega2 = omega_x**2 + omega_y**2 + omega_z**2
omega2_x = tf.constant(omega2 + 1/3 * omega_x * (omega_x + omega_y + omega_z), "complex128")
omega2_y = tf.constant(omega2 + 1/3 * omega_y * (omega_x + omega_y + omega_z), "complex128")
omega2_z = tf.constant(omega2 + 1/3 * omega_z * (omega_x + omega_y + omega_z), "complex128")
decay_x = tf.exp(-viscosity * omega2_x * self.dt)
decay_y = tf.exp(-viscosity * omega2_y * self.dt)
decay_z = tf.exp(-viscosity * omega2_z * self.dt)
delta = tf.constant(-omega2 * self.dt, "complex128")
decay_G = tf.exp(self.params['D_G'] * delta)
decay_X = tf.exp(self.params['D_X'] * delta)
decay_Y = tf.exp(self.params['D_Y'] * delta)
def flow_integrator(rho, u, v, w):
# Enter Fourier Domain
f_rho = self.fft(tf.cast(rho, 'complex128'))
waves_x = self.fft(tf.cast(u, 'complex128'))
waves_y = self.fft(tf.cast(v, 'complex128'))
waves_z = self.fft(tf.cast(w, 'complex128'))
# Viscosity and internal shear
waves_x *= decay_x
waves_y *= decay_y
waves_z *= decay_z
# Exit Fourier Domain
u = tf.cast(self.ifft(waves_x), 'float64')
v = tf.cast(self.ifft(waves_y), 'float64')
w = tf.cast(self.ifft(waves_z), 'float64')
# Calculate gradients
rho_dx = tf.cast(self.ifft(f_rho * self.kernel_dx), 'float64')
rho_dy = tf.cast(self.ifft(f_rho * self.kernel_dy), 'float64')
rho_dz = tf.cast(self.ifft(f_rho * self.kernel_dz), 'float64')
u_dx = tf.cast(self.ifft(waves_x * self.kernel_dx), 'float64')
u_dy = tf.cast(self.ifft(waves_x * self.kernel_dy), 'float64')
u_dz = tf.cast(self.ifft(waves_x * self.kernel_dz), 'float64')
v_dx = tf.cast(self.ifft(waves_y * self.kernel_dx), 'float64')
v_dy = tf.cast(self.ifft(waves_y * self.kernel_dy), 'float64')
v_dz = tf.cast(self.ifft(waves_y * self.kernel_dz), 'float64')
w_dx = tf.cast(self.ifft(waves_z * self.kernel_dx), 'float64')
w_dy = tf.cast(self.ifft(waves_z * self.kernel_dy), 'float64')
w_dz = tf.cast(self.ifft(waves_z * self.kernel_dz), 'float64')
divergence = u_dx + v_dy + w_dz
# This would handle log density continuity, but we do G, X and Y individually
# rho -= (u*rho_dx + v*rho_dy + w*rho_dz + divergence) * self.dt
# Self-advect flow
du = -u*u_dx - v*u_dy - w*u_dz
dv = -u*v_dx - v*v_dy - w*v_dz
dw = -u*w_dx - v*w_dy - w*w_dz
# Propagate pressure waves
du -= c2 * rho_dx
dv -= c2 * rho_dy
dw -= c2 * rho_dz
# Apply strain
du += viscosity * (rho_dx * (u_dx + u_dx) + rho_dy * (u_dy + v_dx) + rho_dz * (u_dz + w_dx) - 2/3*rho_dx * divergence)
dv += viscosity * (rho_dx * (v_dx + u_dy) + rho_dy * (v_dy + v_dy) + rho_dz * (v_dz + w_dy) - 2/3*rho_dy * divergence)
dw += viscosity * (rho_dx * (w_dx + u_dz) + rho_dy * (w_dy + v_dz) + rho_dz * (w_dz + w_dz) - 2/3*rho_dz * divergence)
u += du * self.dt
v += dv * self.dt
w += dw * self.dt
return u, v, w, divergence
def diffusion_advection_integrator(G, X, Y, u, v, w, divergence):
f_G = self.fft(tf.cast(G, 'complex128'))
f_X = self.fft(tf.cast(X, 'complex128'))
f_Y = self.fft(tf.cast(Y, 'complex128'))
f_G *= decay_G
f_X *= decay_X
f_Y *= decay_Y
G = tf.cast(self.ifft(f_G), 'float64')
X = tf.cast(self.ifft(f_X), 'float64')
Y = tf.cast(self.ifft(f_Y), 'float64')
G_dx = tf.cast(self.ifft(f_G * self.kernel_dx), 'float64')
G_dy = tf.cast(self.ifft(f_G * self.kernel_dy), 'float64')
G_dz = tf.cast(self.ifft(f_G * self.kernel_dz), 'float64')
X_dx = tf.cast(self.ifft(f_X * self.kernel_dx), 'float64')
X_dy = tf.cast(self.ifft(f_X * self.kernel_dy), 'float64')
X_dz = tf.cast(self.ifft(f_X * self.kernel_dz), 'float64')
Y_dx = tf.cast(self.ifft(f_Y * self.kernel_dx), 'float64')
Y_dy = tf.cast(self.ifft(f_Y * self.kernel_dy), 'float64')
Y_dz = tf.cast(self.ifft(f_Y * self.kernel_dz), 'float64')
G -= (u*G_dx + v*G_dy + w*G_dz + (G+G0)*divergence) * self.dt
X -= (u*X_dx + v*X_dy + w*X_dz + (X+X0)*divergence) * self.dt
Y -= (u*Y_dx + v*Y_dy + w*Y_dz + (Y+Y0)*divergence) * self.dt
return G, X, Y
else:
raise ValueError('Only up to 3D supported')
reaction_integrator_curried = lambda con_G, con_X, con_Y: reaction_integrator(
con_G, con_X, con_Y,
self.dt, self.params['A'], self.params['B'], self.params['k2'], self.params['k-2'], self.params['k5']
)
self.reaction_integrator = tf.function(reaction_integrator_curried)
self.flow_integrator = tf.function(flow_integrator)
self.diffusion_advection_integrator = tf.function(diffusion_advection_integrator)
def step(self):
self.G, self.X, self.Y = self.reaction_integrator(self.G, self.X, self.Y)
density_of_reactants = (
self.params['density_G'] * self.G +
self.params['density_X'] * self.X +
self.params['density_Y'] * self.Y
)
rho = tf.math.log(self.params['base-density'] + density_of_reactants)
if self.dims == 2:
#c1 = c1 + 1 # BJD added 18.11.2020
u, v = self.u, self.v # Store unintegrated flow so that we're on the same timestep
self.u, self.v, divergence = self.flow_integrator(rho, self.u, self.v)
self.G, self.X, self.Y = self.diffusion_advection_integrator(self.G, self.X, self.Y, u, v, divergence)
print("Value of X: ", self.X) # ***** BJD inserted this line 13.11.2020 *****
# ======================= BJD 18.11.2020 ================================================
#data = [['nameservers','panel'], ['nameservers','panel']]
#with open("output_bjd_Xons_1.txt", "w") as txt_file: # BJD 18.11.2020
# for line in self.X:
# txt_file.write(" ".join(line) + "\n") # works with any number of elements in a line
#c1 = c1 + 1
| |
12
comma = 13
hyphen = 14
period = 15
slash = 16
zero = 17
one = 18
two = 19
three = 20
four = 21
five = 22
six = 23
seven = 24
eight = 25
nine = 26
colon = 27
semicolon = 28
less = 29
equal = 30
greater = 31
question = 32
at = 33
A = 34
B = 35
C = 36
D = 37
E = 38
F = 39
G = 40
H = 41
I = 42
J = 43
K = 44
L = 45
M = 46
N = 47
O = 48
P = 49
Q = 50
R = 51
S = 52
T = 53
U = 54
V = 55
W = 56
X = 57
Y = 58
Z = 59
bracketleft = 60
backslash = 61
bracketright = 62
asciicircum = 63
underscore = 64
quoteleft = 65
a = 66
b = 67
c = 68
d = 69
e = 70
f = 71
g = 72
h = 73
i = 74
j = 75
k = 76
l = 77
m = 78
n = 79
o = 80
p = 81
q = 82
r = 83
s = 84
t = 85
u = 86
v = 87
w = 88
x = 89
y = 90
z = 91
braceleft = 92
bar = 93
braceright = 94
asciitilde = 95
exclamdown = 96
cent = 97
sterling = 98
fraction = 99
yen = 100
florin = 101
section = 102
currency = 103
quotesingle = 104
quotedblleft = 105
guillemotleft = 106
guilsinglleft = 107
guilsinglright = 108
fi = 109
fl = 110
endash = 111
dagger = 112
daggerdbl = 113
periodcentered = 114
paragraph = 115
bullet = 116
quotesinglbase = 117
quotedblbase = 118
quotedblright = 119
guillemotright = 120
ellipsis = 121
perthousand = 122
questiondown = 123
grave = 124
acute = 125
circumflex = 126
tilde = 127
macron = 128
breve = 129
dotaccent = 130
dieresis = 131
ring = 132
cedilla = 133
hungarumlaut = 134
ogonek = 135
caron = 136
emdash = 137
AE = 138
ordfeminine = 139
Lslash = 140
Oslash = 141
OE = 142
ordmasculine = 143
ae = 144
dotlessi = 145
lslash = 146
oslash = 147
oe = 148
germandbls = 149
onesuperior = 150
logicalnot = 151
mu = 152
trademark = 153
Eth = 154
onehalf = 155
plusminus = 156
Thorn = 157
onequarter = 158
divide = 159
brokenbar = 160
degree = 161
thorn = 162
threequarters = 163
twosuperior = 164
registered = 165
minus = 166
eth = 167
multiply = 168
threesuperior = 169
copyright = 170
Aacute = 171
Acircumflex = 172
Adieresis = 173
Agrave = 174
Aring = 175
Atilde = 176
Ccedilla = 177
Eacute = 178
Ecircumflex = 179
Edieresis = 180
Egrave = 181
Iacute = 182
Icircumflex = 183
Idieresis = 184
Igrave = 185
Ntilde = 186
Oacute = 187
Ocircumflex = 188
Odieresis = 189
Ograve = 190
Otilde = 191
Scaron = 192
Uacute = 193
Ucircumflex = 194
Udieresis = 195
Ugrave = 196
Yacute = 197
Ydieresis = 198
Zcaron = 199
aacute = 200
acircumflex = 201
adieresis = 202
agrave = 203
aring = 204
atilde = 205
ccedilla = 206
eacute = 207
ecircumflex = 208
edieresis = 209
egrave = 210
iacute = 211
icircumflex = 212
idieresis = 213
igrave = 214
ntilde = 215
oacute = 216
ocircumflex = 217
odieresis = 218
ograve = 219
otilde = 220
scaron = 221
uacute = 222
ucircumflex = 223
udieresis = 224
ugrave = 225
yacute = 226
ydieresis = 227
zcaron = 228
exclamsmall = 229
Hungarumlautsmall = 230
dollaroldstyle = 231
dollarsuperior = 232
ampersandsmall = 233
Acutesmall = 234
parenleftsuperior = 235
parenrightsuperior = 236
twodotenleader = 237
onedotenleader = 238
zerooldstyle = 239
oneoldstyle = 240
twooldstyle = 241
threeoldstyle = 242
fouroldstyle = 243
fiveoldstyle = 244
sixoldstyle = 245
sevenoldstyle = 246
eightoldstyle = 247
nineoldstyle = 248
commasuperior = 249
threequartersemdash = 250
periodsuperior = 251
questionsmall = 252
asuperior = 253
bsuperior = 254
centsuperior = 255
dsuperior = 256
esuperior = 257
isuperior = 258
lsuperior = 259
msuperior = 260
nsuperior = 261
osuperior = 262
rsuperior = 263
ssuperior = 264
tsuperior = 265
ff = 266
ffi = 267
ffl = 268
parenleftinferior = 269
parenrightinferior = 270
Circumflexsmall = 271
hyphensuperior = 272
Gravesmall = 273
Asmall = 274
Bsmall = 275
Csmall = 276
Dsmall = 277
Esmall = 278
Fsmall = 279
Gsmall = 280
Hsmall = 281
Ismall = 282
Jsmall = 283
Ksmall = 284
Lsmall = 285
Msmall = 286
Nsmall = 287
Osmall = 288
Psmall = 289
Qsmall = 290
Rsmall = 291
Ssmall = 292
Tsmall = 293
Usmall = 294
Vsmall = 295
Wsmall = 296
Xsmall = 297
Ysmall = 298
Zsmall = 299
colonmonetary = 300
onefitted = 301
rupiah = 302
Tildesmall = 303
exclamdownsmall = 304
centoldstyle = 305
Lslashsmall = 306
Scaronsmall = 307
Zcaronsmall = 308
Dieresissmall = 309
Brevesmall = 310
Caronsmall = 311
Dotaccentsmall = 312
Macronsmall = 313
figuredash = 314
hypheninferior = 315
Ogoneksmall = 316
Ringsmall = 317
Cedillasmall = 318
questiondownsmall = 319
oneeighth = 320
threeeighths = 321
fiveeighths = 322
seveneighths = 323
onethird = 324
twothirds = 325
zerosuperior = 326
foursuperior = 327
fivesuperior = 328
sixsuperior = 329
sevensuperior = 330
eightsuperior = 331
ninesuperior = 332
zeroinferior = 333
oneinferior = 334
twoinferior = 335
threeinferior = 336
fourinferior = 337
fiveinferior = 338
sixinferior = 339
seveninferior = 340
eightinferior = 341
nineinferior = 342
centinferior = 343
dollarinferior = 344
periodinferior = 345
commainferior = 346
Agravesmall = 347
Aacutesmall = 348
Acircumflexsmall = 349
Atildesmall = 350
Adieresissmall = 351
Aringsmall = 352
AEsmall = 353
Ccedillasmall = 354
Egravesmall = 355
Eacutesmall = 356
Ecircumflexsmall = 357
Edieresissmall = 358
Igravesmall = 359
Iacutesmall = 360
Icircumflexsmall = 361
Idieresissmall = 362
Ethsmall = 363
Ntildesmall = 364
Ogravesmall = 365
Oacutesmall = 366
Ocircumflexsmall = 367
Otildesmall = 368
Odieresissmall = 369
OEsmall = 370
Oslashsmall = 371
Ugravesmall = 372
Uacutesmall = 373
Ucircumflexsmall = 374
Udieresissmall = 375
Yacutesmall = 376
Thornsmall = 377
Ydieresissmall = 378
Black = 383
Bold = 384
Book = 385
Light = 386
Medium = 387
Regular = 388
Roman = 389
Semibold = 390
nStdStr = 391
@classmethod
def to_s(cls, sid):
if sid == cls._notdef:
return ".notdef"
elif sid == cls.space:
return "space"
elif sid == cls.exclam:
return "exclam"
elif sid == cls.quotedbl:
return "quotedbl"
elif sid == cls.numbersign:
return "numbersign"
elif sid == cls.dollar:
return "dollar"
elif sid == cls.percent:
return "percent"
elif sid == cls.ampersand:
return "ampersand"
elif sid == cls.quoteright:
return "quoteright"
elif sid == cls.parenleft:
return "parenleft"
elif sid == cls.parenright:
return "parenright"
elif sid == cls.asterisk:
return "asterisk"
elif sid == cls.plus:
return "plus"
elif sid == cls.comma:
return "comma"
elif sid == cls.hyphen:
return "hyphen"
elif sid == cls.period:
return "period"
elif sid == cls.slash:
return "slash"
elif sid == cls.zero:
return "zero"
elif sid == cls.one:
return "one"
elif sid == cls.two:
return "two"
elif sid == cls.three:
return "three"
elif sid == cls.four:
return "four"
elif sid == cls.five:
return "five"
elif sid == cls.six:
return "six"
elif sid == cls.seven:
return "seven"
| |
import os, sys
import datetime
import glob
import math
import json
import urllib
from Queue import Queue, PriorityQueue
import sqlite3 as sql
import struct
import threading
from PySide import QtGui, QtCore
from viewer import config
from viewer.ports import ask_for_port
from viewer.sample import *
#from viewer.sample import XOMBIEDecoder, XOMBIEStream, DataSource
from viewer.util import link, find_icon
from viewer.ViewWidget import TabViewContainer, NewTabViewSelector
from viewer.HistGraph import HistoricalGraphTabView
from viewer.LiveGraph import LiveGraphTabView
from viewer.BatteryStatus import BatteryScatterPlotTabView
from viewer.SignalWidget import SignalTreeWidget
def find_source(name):
if app.xombie_thread.isRunning():
return app.xombie_thread.stream.get_data(name)
else:
return DataSource(name)
class ConsoleLogger:
"""
Implements minimal logging.logger functionality and displays to the console
instead of writing to a file by putting messages into a queue which is
periodically copied to the console.
"""
def __init__(self, queue):
self.queue = queue
self.log = open("%s.log" % datetime.date.today().strftime("%Y-%m-%d"), "a")
def info(self, msg, *args, **kwargs):
formatted = (msg % kwargs) if kwargs else (msg % args)
self.queue.put(formatted)
self.log.write(formatted + "\n")
def warning(self, msg, *args, **kwargs):
formatted = (msg % kwargs) if kwargs else (msg % args)
self.queue.put('<font color="orange">%s</font>' % formatted)
self.log.write(formatted + "\n")
def error(self, msg, *args, **kwargs):
formatted = (msg % kwargs) if kwargs else (msg % args)
self.queue.put('<font color="red">%s</font>' % formatted)
self.log.write(formatted + "\n")
def critical(self, msg, *args, **kwargs):
formatted = (msg % kwargs) if kwargs else (msg % args)
self.queue.put('<font color="red">%s</font>' % formatted)
self.log.write(formatted + "\n")
class XOMBIEThread(QtCore.QThread):
"""
Handles connecting to the Telemetry Board and pushing data to the queue
On startup, it launches the additional asynchronous XOMBIEStream thread
and collects data from it using callbacks.
Data from the XOMBIEStream is immediately logged and periodically committed
to disk. Separately, data queues are maintained on the stream for each
signal, from which the XOMBIE thread pushes data to DataSource objects.
instance variables:
connection - the DB-API connection object for the SQLite database, which
we use to commit data as it is received.
stream - the XOMBIEStream which we collect data from
sources - really just DataSource.sources
checking_heartbeat - indicates if we're waiting on a heartbeat response
got_heartbeat - indicates if we got a heartbeat response while we were waiting
heartbeat_timer - timer for one-shot five-second waiting for a response
timer - the main timer for XOMBIEStream event polling handling
commit_timer - the timer for periodic commits
method summary:
setup - takes care of all thread-specific setup
update_sources - notifies all active DataSources to copy over data from
the stream and to notify their listeners
process - implements the core XOMBIE handling loop for associating,
replying to hearbeats, and collecting data
check_heartbeat - callback to check if we got a heartbeat response to
our heartbeat request
mark_heartbeat - callback to record any heartbeat responses
insert_data - Adds a single data point to the database
commit_data - Commits data to the database
shutdown - Handles closing down the XOMBIEStream
"""
shutdown_event_type = QtCore.QEvent.Type(QtCore.QEvent.registerEventType())
def __init__(self, conn, stream, parent=None):
QtCore.QThread.__init__(self, parent)
self.stream = stream
self.connection = conn
self.timer = self.commit_timer = None
self.should_query = False
self.should_test = False
self.should_reassociate = False
self.should_discover = False
link(self.started, self.setup)
def setup(self):
self.got_heartbeat = False
self.checking_heartbeat = False
self.checking_assoc = False
self.heartbeat_timer = QtCore.QTimer()
self.heartbeat_timer.setSingleShot(True)
link(self.heartbeat_timer.timeout, self.check_timeout)
self.timer = QtCore.QTimer()
self.commit_timer = QtCore.QTimer()
link(self.commit_timer.timeout, self.commit_data)
link(self.timer.timeout, self.process)
self.stream.add_callback(0x85, self.mark_heartbeat)
self.stream.add_callback(0xC2, self.print_histogram)
self.stream.add_callback(0xE2, self.print_test)
self.stream.start()
self.commit_timer.start(5000)
self.timer.start(100)
def process(self):
"""
Handles one iteration of the XOMBIEStream processing.
Overall plan:
If we're not associated, try to associate
If we are, read in data from the stream
If we haven't heard anything for five seconds,
send a heartbeat request to make sure that
they're still there.
"""
if self.should_query:
self.should_query = False
self.stream.logger.info("Requesting CAN BUS Status Query")
self.stream.send_no_ack("\xc1")
elif self.should_test:
self.should_test = False
self.stream.logger.info("Requesting Signal Strength Test")
self.stream.send_no_ack("\xe1")
elif self.should_reassociate:
self.should_reassociate = False
self.stream.logger.info("Resetting to UNASSOCIATED mode")
self.stream.state = XOMBIEStream.UNASSOCIATED
elif self.should_discover:
self.should_discover = False
self.stream.logger.info("Requesting node discover")
def display_nodes(resp):
if "parameter" not in resp or not resp["parameter"]:
return
results = resp["parameter"]
self.stream.logger.info("Node discovered:")
fmt = "<HIIB"
data = results[:struct.calcsize(fmt)]
ident = results[struct.calcsize(fmt):-1]
unpacked = struct.unpack(fmt, data)
short_addr, long_addr_high, long_addr_low, rssi = unpacked
self.stream.logger.info(" Node ID: %s" % ident)
self.stream.logger.info(" 64-bit addr: %#08X%08X" %
(long_addr_high, long_addr_low))
self.stream.logger.info(" RSSI: %ddBm" % -rssi)
self.stream.at_command("ND", callback=display_nodes)
five_seconds = datetime.timedelta(seconds=15)
if self.stream.state is XOMBIEStream.UNASSOCIATED and not self.checking_assoc:
self.stream.send_handshake1()
print "Attempting to associate"
self.timer.setInterval(500)
self.checking_assoc = True
self.heartbeat_timer.start(5000)
if self.stream.state is XOMBIEStream.ASSOCIATED:
self.timer.setInterval(50)
gap = datetime.datetime.utcnow() - self.stream.last_received
if gap > five_seconds:
if not self.checking_heartbeat:
self.got_heartbeat = False
self.checking_heartbeat = True
self.stream.logger.warning("Haven't received data packet since %s",
self.stream.last_received.strftime("%H:%M:%S"))
self.stream.logger.warning("Sending heartbeat check")
self.stream.send_no_ack("\x84")
self.heartbeat_timer.start(5000)
else:
cursor = self.connection.cursor()
while not self.stream.msg_queue.empty():
id_, name, t, datum = self.stream.msg_queue.get_nowait()
self.insert_data(cursor, id_, name, t, datum)
cursor.close()
for source in self.stream.data_table.values():
source.pull()
def check_timeout(self):
if self.checking_heartbeat:
if not self.got_heartbeat:
self.stream.logger.error("Didn't hear a heartbeat response - disassociating.")
self.stream.state = XOMBIEStream.UNASSOCIATED
self.got_heartbeat = False
self.checking_heartbeat = False
if self.checking_assoc:
if self.stream.state != XOMBIEStream.ASSOCIATED:
self.stream.logger.error("Failed to associate within five seconds - resetting.")
self.stream.state = XOMBIEStream.UNASSOCIATED
self.checking_assoc = False
def mark_heartbeat(self):
if self.checking_heartbeat:
self.got_heartbeat = True
self.stream.logger.info("Got heartbeat response")
def print_histogram(self, counts):
interval = 5
self.stream.logger.info("----Histogram-------------")
for i, count in enumerate(counts):
if count:
self.stream.logger.info("%3d-%3dms: %d",
i*interval,
interval*(i+1),
count)
def print_test(self, msg):
self.stream.logger.info("Got test message: %s" % msg)
def insert_data(self, cursor, id_, name, t, data):
cmd = "INSERT INTO data(id, name, time, data) VALUES (?,?,?,?)"
cursor.execute(cmd, (id_, name, t, json.dumps(data, ensure_ascii=False)))
def commit_data(self):
self.connection.commit()
def event(self, evt):
if evt.type() == self.shutdown_event_type:
evt.accept()
self.shutdown()
return True
else:
return QtCore.QThread.event(self, evt)
def shutdown(self):
self.commit_timer.stop()
self.timer.stop()
if self.stream is not None:
self.stream.close()
self.quit()
class TransparentThread(QtCore.QThread):
shutdown_event_type = QtCore.QEvent.Type(QtCore.QEvent.registerEventType())
def __init__(self, conn, stream, parent=None):
QtCore.QThread.__init__(self, parent)
self.stream = stream
self.connection = conn
self.timer = self.commit_timer = None
link(self.started, self.setup)
def setup(self):
self.timer = QtCore.QTimer()
self.commit_timer = QtCore.QTimer()
link(self.commit_timer.timeout, self.commit_data)
link(self.timer.timeout, self.process)
self.commit_timer.start(5000)
self.timer.start(50)
def process(self):
self.stream.process()
cursor = self.connection.cursor()
while not self.stream.msg_queue.empty():
id_, name, t, datum = self.stream.msg_queue.get_nowait()
self.insert_data(cursor, id_, name, t, datum)
cursor.close()
for source in self.stream.data_table.values():
source.pull()
def insert_data(self, cursor, id_, name, t, data):
cmd = "INSERT INTO data(id, name, time, data) VALUES (?,?,?,?)"
cursor.execute(cmd, (id_, name, t, json.dumps(data, ensure_ascii=False)))
def commit_data(self):
self.connection.commit()
def event(self, evt):
if evt.type() == self.shutdown_event_type:
evt.accept()
self.shutdown()
return True
else:
return QtCore.QThread.event(self, evt)
def shutdown(self):
self.commit_timer.stop()
self.timer.stop()
if self.stream is not None:
self.stream.close()
self.quit()
def tableExists(conn, name):
cur = conn.cursor()
cur.execute('SELECT name FROM sqlite_master where name = ?;', (name,))
exists = cur.fetchone() != None
cur.close()
return exists
class TelemetryApp(QtGui.QApplication):
def setup(self):
port = ask_for_port(os.path.join("config", "ports.cfg"))
if port is None:
self.start_thread = False
print "Running in debug mode - no serial port connected"
else:
self.start_thread = True
self.read_config()
print "Logging to %s" % self.general_options["database"]
self.connection = sql.connect(self.general_options["database"],
detect_types=(sql.PARSE_DECLTYPES
| sql.PARSE_COLNAMES))
self.config_database(self.connection, False)
desc_sets = self.load_can_descriptors()
decoder = TransparentMessageDecoder([desc_set for (source, desc_set) in desc_sets])
#decoder = XOMBIEDecoder([desc_set for (source, desc_set) in desc_sets])
self.window = TelemetryViewerWindow(self, "Telemetry Viewer", desc_sets)
if self.start_thread:
stream = TransparentStream(decoder, self.window.logger, port)
## stream = XOMBIEStream(port, decoder, self.window.logger,
## self.general_options["board_address"])
else:
stream = None
self.xombie_thread = TransparentThread(self.connection, stream)
# For sending data to an external web server
self.web_connect_thread = threading.Thread(target=self.web_connect,
args=[stream])
self.web_connect_thread.daemon = True
self.web_connect_thread.start()
link(self.lastWindowClosed, self.closeEvent)
def web_connect(self, stream):
"""Retrieves parameters from general.cfg, periodically uploads data
to host"""
if not stream:
print 'No Adruino appears to be connected, web connect shutting down'
return
# These are the packets we're looking for
packets = {
'speed': stream.get_data('0x403:Vehicle Velocity'),
'array_current': stream.get_data('0x524:Current 2'),
'motor_current': stream.get_data('0x524:Current 1'),
'tritium_volt': stream.get_data('0x402:Bus Voltage'),
'tritium_current': stream.get_data('0x402:Bus Current'),
'battery_volt': stream.get_data('0x523:Voltage 1'),
}
host = self.general_options['host']
car_id = self.general_options['car_id']
car_token = self.general_options['car_token']
# Check if we can reach the host
print 'Web connect started with id %s, token %s.' % (car_id, car_token)
print 'Attempting to communicate with host...'
try:
result = urllib.urlopen('%s/api/cars' % host).read()
cars = json.loads(result)['cars']
name, token = cars[str(car_id)]
print 'Success, car name is %s' % name
except Exception, e: # TODO: More specific exception?
print 'Error reaching url %s/api/cars\r\n' % host
print e
while True:
try:
packet_data = {'id': car_id, 'token': car_token}
for key, value | |
<reponame>vanheeringen-lab/scepia
# Copyright (c) 2019 <NAME> <<EMAIL>>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file LICENSE included with this
# distribution.
from collections import Counter
import os
import sys
from tempfile import NamedTemporaryFile, TemporaryDirectory
# Typing
from typing import List, Optional, Tuple
from anndata import AnnData
from appdirs import user_cache_dir
from loguru import logger
import numpy as np
import pandas as pd
import scanpy as sc
from sklearn.linear_model import ( # noqa: F401
BayesianRidge,
LogisticRegression,
LassoCV,
)
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import KFold
from sklearn.preprocessing import scale
from sklearn.utils import shuffle
from scipy.sparse import issparse
from scipy.stats import percentileofscore, combine_pvalues
from statsmodels.stats.multitest import multipletests
from tqdm.auto import tqdm
import geosketch
from gimmemotifs.moap import moap
from gimmemotifs.maelstrom import run_maelstrom
from gimmemotifs.motif import read_motifs
from gimmemotifs.utils import pfmfile_location
from scepia import __version__
from scepia.plot import plot
from scepia.util import fast_corr
from scepia.data import ScepiaDataset
CACHE_DIR = os.path.join(user_cache_dir("scepia"))
def motif_mapping(
pfm: Optional[str] = None,
genes: Optional[List[str]] = None,
indirect: Optional[bool] = True,
) -> pd.DataFrame:
"""Read motif annotation and return as DataFrame.
Parameters
----------
pfm : `str`, optional
Name of pfm file. Should have an associated file with mapping from
motif to factors, with the .motif2factors.txt extension.
genes : `list`, optional
List of gene names to include. If None all genes will be included.
indirect : `boolean`, optional
Include indirect factors in the annotation. Default True. If set to
False only factors for which there is direct evidence will be
used.
Returns
-------
`pd.DataFrame`
DataFrame with motif names as index and an associated column with TFs
that bind to the motifs.
"""
m = read_motifs(pfm)
m2f = {}
for motif in m:
factors = motif.factors["direct"]
# Also include factors for which there is no direct evidence
if indirect:
factors += motif.factors.get("indirect\nor predicted", [])
# Create a string of comma-separated factors per motif
factors = list(set([factor.upper() for factor in factors]))
for factor in factors:
if genes is None or factor in genes:
if motif.id not in m2f:
m2f[motif.id] = factor
else:
m2f[motif.id] = m2f[motif.id] + "," + factor
m2f = pd.DataFrame({"factors": m2f})
return m2f
def annotate_with_k27(
adata: AnnData,
gene_df: pd.DataFrame,
cluster: Optional[str] = "louvain",
n_neighbors: Optional[int] = 20,
center_expression: Optional[bool] = True,
model: Optional[str] = "BayesianRidge",
use_neighbors: Optional[bool] = True,
use_raw: Optional[bool] = False,
subsample: Optional[bool] = True,
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Annotate single cell data.
"""
gene_df.index = gene_df.index.str.upper()
# Only use genes that overlap
common_genes = adata.var_names.str.upper().intersection(gene_df.index).unique()
# print(common_genes)
# Create expression DataFrame based on common genes
if use_raw:
expression = pd.DataFrame(
np.log1p(
adata.raw.X[
:, adata.var_names.str.upper().isin(gene_df.index)
].todense()
),
index=adata.obs_names,
columns=common_genes,
).T
else:
expression = adata.X[:, adata.var_names.str.upper().isin(gene_df.index)]
expression = (
np.squeeze(np.asarray(expression.todense()))
if issparse(expression)
else expression
)
expression = pd.DataFrame(
expression, index=adata.obs_names, columns=common_genes,
).T
if center_expression:
expression = expression.sub(expression.mean(1), 0)
# Get sampled idxs
N = 100000
unique_cell_types = adata.obs[cluster].unique()
counts = adata.obs.groupby(cluster).count().iloc[:, 0].to_dict()
ids = np.arange(adata.shape[0])
idxs = []
for cell_type in unique_cell_types:
if counts[cell_type] <= N:
idx = ids[adata.obs[cluster] == cell_type]
else:
idx = np.random.choice(
ids[adata.obs[cluster] == cell_type], N, replace=False
)
idxs.extend(idx)
X = gene_df.loc[common_genes]
model = getattr(sys.modules[__name__], model)()
kf = KFold(n_splits=5)
result = []
df_coef = pd.DataFrame(index=gene_df.columns)
with tqdm(total=len(idxs), file=sys.stdout) as pbar:
for i in idxs:
if use_neighbors:
my_neighbors = (
pd.DataFrame((adata.obsp["connectivities"][i] != 0).todense())
.iloc[0]
.values
)
y = expression.loc[:, my_neighbors].mean(1)
else:
y = expression.iloc[:, i]
if subsample:
cts = []
for _, idx in kf.split(X):
model.fit(X.iloc[idx], y[idx])
coef = pd.DataFrame(model.coef_, index=gene_df.columns)
ct = coef.sort_values(0).tail(1).index[0]
cts.append(ct)
# df_coef[i] = 0
top_ct = Counter(cts).most_common()[0][0]
df_coef[i] = pd.DataFrame.from_dict(Counter(cts), orient="index")
df_coef[i] = df_coef[i].fillna(0)
else:
model.fit(X, y)
if model == "LogisticRegression":
coef = pd.DataFrame(model.coef_[0], index=gene_df.columns)
else:
coef = pd.DataFrame(model.coef_, index=gene_df.columns)
df_coef[i] = coef[0]
top_ct = coef.sort_values(0).tail(1).index[0]
# print("{}\t{}".format(top_ct, adata.obs['cell_type'].iloc[i]), coef.sort_values(0).tail(5).index)
result.append([top_ct])
pbar.update(1)
df_coef = df_coef[sorted(df_coef.columns)]
return (
pd.DataFrame(result, columns=["cell_annotation"], index=adata.obs_names[idxs]),
df_coef,
)
def relevant_cell_types(
adata: AnnData,
gene_df: pd.DataFrame,
cluster: Optional[str] = "louvain",
n_top_genes: Optional[int] = 1000,
max_cell_types: Optional[int] = 50,
cv: Optional[int] = 5,
) -> List[str]:
"""Select relevant cell types for annotation and motif inference.
Based on Lasso regression a subset of features (cell type
profile) will be selected. Expression is averaged over clusters.
Requires louvain or leiden clustering to be run on the `adata` object.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
gene_df : :class:`pandas.DataFrame`
Gene-based reference data.
n_top_genes : `int`, optional (default: 1000)
Number of variable genes is used. If `n_top_genes` is greater than the
number of hypervariable genes in `adata` then all variable genes are
used.
max_cell_types : `int`, optional (default: 50)
Maximum number of cell types to select.
cv : `int`, optional (default: 5)
Folds for cross-validation
Returns
-------
`list`
Cell types ordered by the mean absolute coefficient over clusters in
descending order.
"""
gene_df.index = gene_df.index.str.upper()
gene_df = gene_df[gene_df.max(1) > 0]
logger.info("Selecting reference cell types")
common_genes = list(gene_df.index[gene_df.index.isin(adata.var_names.str.upper())])
expression = (
np.squeeze(np.asarray(adata.X.todense())) if issparse(adata.X) else adata.X
)
expression = pd.DataFrame(
expression, index=adata.obs_names, columns=adata.var_names.str.upper()
).T
expression = expression.loc[common_genes]
expression.columns = adata.obs[cluster]
expression = expression.groupby(expression.columns, axis=1).mean()
var_genes = (
adata.var.loc[
adata.var_names.str.upper().isin(common_genes), "dispersions_norm"
]
.sort_values()
.tail(n_top_genes)
.index.str.upper()
)
logger.info(f"Using {len(var_genes)} hypervariable common genes")
expression = expression.loc[var_genes]
X = gene_df.loc[var_genes]
g = LassoCV(cv=cv, selection="random")
cell_types = pd.DataFrame(index=X.columns)
for col in expression.columns:
g.fit(X, expression[col])
coefs = pd.DataFrame(g.coef_, index=X.columns)
cell_types[col] = coefs
cell_types = cell_types.abs().sum(1).sort_values().tail(max_cell_types)
cell_types = cell_types[cell_types > 0].index
top = cell_types[-5:]
logger.info("{} out of {} selected".format(len(cell_types), gene_df.shape[1]))
logger.info(f"Top {len(top)}:")
for cell_type in top:
logger.info(f" * {cell_type}")
return cell_types
def validate_adata(adata: AnnData) -> None:
"""Check if adata contains the necessary prerequisites to run the
motif inference.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
"""
if adata.raw is None:
raise ValueError("Please save the raw expression data in the .raw property.")
if "connectivities" not in adata.obsp or (
"louvain" not in adata.obs and "leiden" not in adata.obs
):
raise ValueError("Please run louvain or leiden clustering first.")
def change_region_size(series: pd.Series, size: Optional[int] = 200) -> pd.Series:
if not isinstance(series, pd.Series):
if hasattr(series, "to_series"):
series = series.to_series()
else:
series = pd.Series(series)
loc = series.str.split("[:-]", expand=True)
loc["start"] = (loc[1].astype(int) + loc[2].astype(int)) // 2 - (size // 2)
loc["end"] = (loc["start"] + size).astype(str)
loc["start"] = loc["start"].astype("str")
return loc[0] + ":" + loc["start"] + "-" + loc["end"]
def annotate_cells(
adata: AnnData,
dataset: str,
cluster: Optional[str] = "louvain",
n_top_genes: Optional[int] = 1000,
max_cell_types: Optional[int] = 50,
min_annotated: Optional[int] = 50,
select: Optional[bool] = True,
) -> None:
"""
Assign cells with cell type based on H3K27ac reference profiles.
"""
# Determine relevant reference cell types.
# All other cell types will not be used for motif activity and
# cell type annotation.
data = ScepiaDataset(dataset)
gene_df = data.load_reference_data(reftype="gene")
if select:
cell_types = relevant_cell_types(
adata,
gene_df,
cluster=cluster,
n_top_genes=n_top_genes,
max_cell_types=max_cell_types,
)
else:
logger.info("Selecting all reference cell types.")
cell_types = gene_df.columns
if "scepia" not in adata.uns:
adata.uns["scepia"] = {"version": __version__}
adata.uns["scepia"]["cell_types"] = list(cell_types)
logger.info("Annotating cells.")
annotation_result, df_coef = annotate_with_k27(
adata,
gene_df[cell_types],
cluster=cluster,
use_neighbors=True,
model="BayesianRidge",
subsample=False,
use_raw=False,
)
adata.obsm["X_cell_types"] = df_coef.T[adata.uns["scepia"]["cell_types"]].values
# Annotate by highest mean coefficient
coefs = pd.DataFrame(
adata.obsm["X_cell_types"], index=adata.obs_names, columns=cell_types
)
coefs["cluster"] = adata.obs[cluster]
cluster_anno = (
coefs.groupby("cluster").mean().idxmax(axis=1).to_frame("cluster_annotation")
)
if "cluster_annotation" in adata.obs:
adata.obs = adata.obs.drop(columns=["cluster_annotation"])
adata.obs = adata.obs.join(cluster_anno, on=cluster)
# Second round of annotation, including "other"
assign_cell_types(adata, min_annotated=min_annotated)
def infer_motifs(
adata: AnnData,
dataset: str,
cluster: Optional[str] = "louvain",
n_top_genes: Optional[int] = 1000,
max_cell_types: Optional[int] = 50,
pfm: Optional[str] = None,
min_annotated: Optional[int] = 50,
num_enhancers: Optional[int] = 10000,
maelstrom: Optional[bool] = False,
indirect: Optional[bool] = True,
n_sketch: Optional[int] = 2500,
n_permutations: Optional[int] = 100000,
) -> None:
"""Infer motif ativity for single cell RNA-seq data.
The adata object is modified with the following fields.
**X_cell_types** : `adata.obsm` field
Cell type coefficients.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
dataset : `str`
Name of reference data set or directory with reference data.
cluster : `str`, optional (default: "louvain")
Name of the clustering, can be either louvain or leiden.
n_top_genes : `int`, optional (default: 1000)
Number of variable genes that is used. If `n_top_genes` is greater than the
number of hypervariable genes in `adata` then all variable genes are
used.
max_cell_types : `int`, optional (default: 50)
Maximum number of cell types to select.
pfm : `str`, optional (default: None)
Name of | |
<gh_stars>0
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
This is the verbose version of my response to the Python code prompt. I may have gotten carried away making it interactive...
"Use object-oriented Python to model a public library (w/ three classes: Library, Shelf, & Book). *
- The library should be aware of a number of shelves.
- Each shelf should know what books it contains.
- Make the book object have "enshelf" and "unshelf" methods that control what shelf the book is sitting on.
- The library should have a method to report all books it contains.
Note: this should *not* be a Django (or any other) app - just a single file with three classes (plus commands at the bottom showing it works) is all that is needed."
'''
import string
import random
class Library:
def __init__(self):
''' Create a Library containing Shelf objects that contain Book objects, and can be queried with report_library_state() and list_all_shelf_contents(). A Library can fill itself with Book and Shelf objects via the regenerate_library() method and return its exact book count using count_books_in_this_library(). '''
## Create the_card_catalogue, which will contain all shelves and the books therein. They live in the little card drawers. It's a very small library.
## The keys of the_card_catalogue are lowercase letters of the alphabet corresponding to shelf names.
self.the_card_catalogue = {}
## Books can be taken out of those musty old drawers. Who knows why anyone would want to.
## The keys of the_dictionary_of_unshelved_books are book title strings corresponding to the title of the book stored as each key's value.
self.the_dictionary_of_unshelved_books = {}
self.regenerate_library()
def report_library_state(self):
print( "\nThe library currently contains %r books in its %r shelves." % ( self.count_books_in_this_library(), len(self.the_card_catalogue) ) )
print( "\n%r books are currently checked out of the library." % (len(self.the_dictionary_of_unshelved_books)) )
def list_all_shelf_names(self):
shelf_list_handler = []
for each_shelf in sorted(self.the_card_catalogue):
shelf_list_handler.append("%r (%r)" % (each_shelf, len(self.the_card_catalogue[each_shelf].shelf_contents)))
shelf_list_string = ", ".join(shelf_list_handler)
print("\nThis library contains the following shelves (with this many books):")
print(shelf_list_string)
def list_all_shelf_contents(self):
for each_shelf in sorted(self.the_card_catalogue):
self.the_card_catalogue[each_shelf].report_shelf_contents()
def list_the_contents_of_this_particular_shelf(self, which_shelf_letter):
## Input sanitization.
if determine_if_this_is_a_valid_shelf_name(which_shelf_letter):
self.the_card_catalogue[which_shelf_letter].report_shelf_contents()
def create_new_book(self, title=None):
if title == None:
new_book_title_list = []
for each_new_book_title_character in range(0, random.randint(3, 6)):
## 97 through 122 are the ASCII ordinals for the lowercase letters a through z, inclusive.
new_book_title_list.append(chr(random.randint(97, 122)))
new_book_title = "".join(new_book_title_list)
else:
if title.isalpha():
new_book_title = title
else:
raise ValueError("%r is not a valid character string." % (title))
## Note: Books are enshelved automatically upon initialization. This process requires the Book to know which library it's in so it can find its Shelf.
## Due to the automatic enshelvenation protocol, no return statement is necessary for this robotic library's create_new_book() method.
the_new_book = Book(self, new_book_title)
def regenerate_library(self, new_books_to_create=None):
## In order to make a new library, you must:
## Burn down the building
del self.the_card_catalogue
del self.the_dictionary_of_unshelved_books
## Repair the ruins
self.the_card_catalogue = {}
self.the_dictionary_of_unshelved_books = {}
## Recarve the shelves
for each_ordinal_for_the_letters_of_the_alphabet in range(97, 123):
the_shelf_letter = chr(each_ordinal_for_the_letters_of_the_alphabet)
self.the_card_catalogue[the_shelf_letter] = Shelf(the_shelf_letter)
## Gather monkeys and typewriters
new_books_to_create = random.randint(30, 60)
## Rewrite the books
for each_new_book in range(0, new_books_to_create):
self.create_new_book()
def count_books_in_this_library(self):
number_of_books_in_the_library = 0
for each_shelf_key_name in self.the_card_catalogue:
for each_book_key_name in self.the_card_catalogue[each_shelf_key_name].shelf_contents:
number_of_books_in_the_library += 1
return number_of_books_in_the_library
class Shelf:
def __init__(self, supplied_shelf_letter):
''' Create a Shelf named with a shelf_letter with shelf_contents inside it that can be known by calling report_shelf_contents(). '''
self.shelf_letter = supplied_shelf_letter
self.shelf_contents = {}
def report_shelf_contents(self):
if len(self.shelf_contents) > 0:
print("\n Shelf letter %r contains the following %r books:" % (self.shelf_letter, len(self.shelf_contents)))
for each_book in sorted(self.shelf_contents):
print(" " + self.shelf_contents[each_book].title)
else:
print("\n Shelf letter %r is currently empty." % (self.shelf_letter))
class Book:
def __init__(self, supplied_library_object, supplied_book_title):
''' Create a Book with a title that is_on_this_shelf and can be enshelf()ed amd unshelf()ed. '''
self.reference_variable_for_the_library_this_book_is_inside = supplied_library_object
#self.title = supplied_book_title
self.title = supplied_book_title[0].upper() + supplied_book_title[1:]
self.is_on_this_shelf = None
self.enshelf()
def unshelf(self, no_shuffle_trace=False):
''' Ensure this Book is not in a Shelf by removing it from any Shelf it might be in and inserting it into the_dictionary_of_unshelved_books. '''
## If this book is NOT in the pile of unshelved books...
if self.is_on_this_shelf != self.reference_variable_for_the_library_this_book_is_inside.the_dictionary_of_unshelved_books:
## If the book was recently created, it might have None as its Shelf reference. foo.pop() wouldn't like that, so handle it with a simple conditional:
if self.is_on_this_shelf != None:
## Cancelling the confusing half of the spamminess at the start:
if no_shuffle_trace == False:
old_shelf_letter = self.is_on_this_shelf.shelf_letter
old_shelf_contents = ", ".join(sorted(list(self.reference_variable_for_the_library_this_book_is_inside.the_card_catalogue[old_shelf_letter].shelf_contents)))
print(" || Shelf %r contents: %r" % (old_shelf_letter, old_shelf_contents))
print(" <--- Book %r has been removed from shelf %r." % (self.title, self.is_on_this_shelf.shelf_letter))
self.is_on_this_shelf.shelf_contents.pop(self.title)
old_shelf_contents = ", ".join(sorted(list(self.reference_variable_for_the_library_this_book_is_inside.the_card_catalogue[old_shelf_letter].shelf_contents)))
print(" || Shelf %r contents: %r" % (old_shelf_letter, old_shelf_contents))
## This else is necessary because before pop() the book is still on the old shelf and after pop() it's gone, and without taking that into account the print messages aren't as nifty.
else:
self.is_on_this_shelf.shelf_contents.pop(self.title)
## Now put the book in the unshelved pile:
self.reference_variable_for_the_library_this_book_is_inside.the_dictionary_of_unshelved_books[self.title] = self
## ... and make sure it knows it's there.
self.is_on_this_shelf = self.reference_variable_for_the_library_this_book_is_inside.the_dictionary_of_unshelved_books
def enshelf(self, which_shelf_letter=None, no_shuffle_trace=True):
if which_shelf_letter == None:
which_shelf_letter = self.title[0].lower()
## First, ensure the book is 100% unshelved and placed in the unshelved Book pile:
self.unshelf(no_shuffle_trace=True) # enshelf() shouldn't use no_shuffle_trace=False.
## The Book must now be removed from the pile of unshelved Books:
self.reference_variable_for_the_library_this_book_is_inside.the_dictionary_of_unshelved_books.pop(self.title)
## Now we need to obtain a reference to the specific Shelf this Book must go in:
shelf_to_put_this_book_on = self.reference_variable_for_the_library_this_book_is_inside.the_card_catalogue[which_shelf_letter]
## The act of putting a Book in a Shelf is creating a key inside the Shelf's shelf_contents dictionary and setting its value to this Book object.
shelf_to_put_this_book_on.shelf_contents[self.title] = self
self.is_on_this_shelf = shelf_to_put_this_book_on
## This removes the spam during Book initialization and saves it for Book shuffling:
if no_shuffle_trace == False:
print(" ---> Book %r has been moved to shelf letter %r." % (self.title, self.is_on_this_shelf.shelf_letter))
print(" || Shelf %r contents: %r" % (self.is_on_this_shelf.shelf_letter, ", ".join(sorted(self.is_on_this_shelf.shelf_contents))))
def determine_if_this_is_a_valid_shelf_name(which_shelf_letter):
which_shelf_letter = which_shelf_letter.lower()
if not which_shelf_letter.isalpha():
print("%r is not an alphanumeric string." % (which_shelf_letter))
elif ( len(which_shelf_letter) != 1):
print("%r is either too long or too short to be a single letter." % (which_shelf_letter))
elif ( ord(which_shelf_letter) < 97 ) or ( ord(which_shelf_letter) > 122 ):
print("%r is not a valid shelf letter." % (which_shelf_letter))
else:
return True
def unshelf(self, no_shuffle_trace=True):
''' Ensure this Book is not in a Shelf by removing it from any Shelf it might be in and inserting it into the_dictionary_of_unshelved_books. '''
## If this book is NOT in the pile of unshelved books...
if self.is_on_this_shelf != self.reference_variable_for_the_library_this_book_is_inside.the_dictionary_of_unshelved_books:
## If the book was recently created, it might have None as its Shelf reference. foo.pop() wouldn't like that, so handle it with a simple conditional:
if self.is_on_this_shelf != None:
## Cancelling the confusing half of the spamminess at the start:
if no_shuffle_trace == False:
old_shelf_letter = self.is_on_this_shelf.shelf_letter
old_shelf_contents = ", ".join(sorted(list(self.reference_variable_for_the_library_this_book_is_inside.the_card_catalogue[old_shelf_letter].shelf_contents)))
print(" || Shelf %r contents: %r" % (old_shelf_letter, old_shelf_contents))
print(" <--- Book %r has been removed from shelf %r." % (self.title, self.is_on_this_shelf.shelf_letter))
self.is_on_this_shelf.shelf_contents.pop(self.title)
old_shelf_contents = ", ".join(sorted(list(self.reference_variable_for_the_library_this_book_is_inside.the_card_catalogue[old_shelf_letter].shelf_contents)))
print(" || Shelf %r contents: %r" % | |
<gh_stars>0
import datetime
import difflib
import hashlib
import os
import re
import socket
import time
from xmlrpc.client import ServerProxy
import psycopg2
import redis
import ujson as json
from kombu import Connection
from kombu import Exchange
from kombu import Queue
from kombu import serialization
from kombu import uuid
from kombu.utils.compat import nested
from psycopg2 import sql
serialization.register(
"ujson",
json.dumps,
json.loads,
content_type="application/x-ujson",
content_encoding="utf-8",
)
# additional serializer for pg-amqp messages
serialization.register(
"txtjson", json.dumps, json.loads, content_type="text", content_encoding="utf-8"
)
class Tester:
def __init__(self):
self.time_now = int(time.time())
self.initRedis()
self.initSupervisor()
def getDbConnection(self):
"""
Return a connection for the postgres database.
"""
db_conn = None
while not db_conn:
try:
_db_name = os.getenv("DB_NAME", "artemis_db")
_user = os.getenv("DB_USER", "artemis_user")
_host = os.getenv("DB_HOST", "postgres")
_port = os.getenv("DB_PORT", 5432)
_password = os.getenv("DB_PASS", "<PASSWORD>")
db_conn = psycopg2.connect(
application_name="detection-tester",
dbname=_db_name,
user=_user,
host=_host,
port=_port,
password=_password,
)
except BaseException:
time.sleep(1)
return db_conn
def initRedis(self):
redis_ = redis.Redis(
host=os.getenv("REDIS_HOST", "backend"), port=os.getenv("REDIS_PORT", 6739)
)
self.redis = redis_
while True:
try:
if not self.redis.ping():
raise BaseException("could not ping redis")
break
except Exception:
print("retrying redis ping in 5 seconds...")
time.sleep(5)
def initSupervisor(self):
BACKEND_SUPERVISOR_HOST = os.getenv("BACKEND_SUPERVISOR_HOST", "backend")
BACKEND_SUPERVISOR_PORT = os.getenv("BACKEND_SUPERVISOR_PORT", 9001)
self.supervisor = ServerProxy(
"http://{}:{}/RPC2".format(BACKEND_SUPERVISOR_HOST, BACKEND_SUPERVISOR_PORT)
)
def clear(self):
db_con = self.getDbConnection()
db_cur = db_con.cursor()
query = "delete from bgp_updates; delete from hijacks;"
db_cur.execute(query)
db_con.commit()
db_cur.close()
db_con.close()
self.redis.flushall()
self.curr_idx = 0
self.send_cnt = 0
self.expected_messages = 0
@staticmethod
def redis_key(prefix, hijack_as, _type):
assert (
isinstance(prefix, str)
and isinstance(hijack_as, int)
and isinstance(_type, str)
)
return Tester.get_hash([prefix, hijack_as, _type])
@staticmethod
def get_hash(obj):
return hashlib.shake_128(json.dumps(obj).encode("utf-8")).hexdigest(16)
@staticmethod
def waitExchange(exchange, channel):
"""
Wait passively until the exchange is declared.
"""
while True:
try:
exchange.declare(passive=True, channel=channel)
break
except Exception:
time.sleep(1)
def waitProcess(self, mod, target):
state = self.supervisor.supervisor.getProcessInfo(mod)["state"]
while state != target:
time.sleep(0.5)
state = self.supervisor.supervisor.getProcessInfo(mod)["state"]
def validate_message(self, body, message):
"""
Callback method for message validation from the queues.
"""
print(
'\033[92mTest "{}" - Receiving Batch #{} - Type {} - Remaining {}'.format(
self.curr_test,
self.curr_idx,
message.delivery_info["routing_key"],
self.expected_messages - 1,
)
)
if isinstance(body, dict):
event = body
else:
event = json.loads(body)
# distinguish between type of messages
if message.delivery_info["routing_key"] == "update-update":
expected = self.messages[self.curr_idx]["detection_update_response"]
assert self.redis.exists(event["key"]), "Monitor key not found in Redis"
if "peer_asn" in event:
assert self.redis.sismember(
"peer-asns", event["peer_asn"]
), "Monitor/Peer ASN not found in Redis"
elif message.delivery_info["routing_key"] == "update":
expected = self.messages[self.curr_idx]["detection_hijack_response"]
redis_hijack_key = Tester.redis_key(
event["prefix"], event["hijack_as"], event["type"]
)
assert self.redis.exists(redis_hijack_key), "Hijack key not found in Redis"
elif message.delivery_info["routing_key"] == "hijack-update":
expected = self.messages[self.curr_idx]["database_hijack_response"]
if event["active"]:
assert self.redis.sismember(
"persistent-keys", event["key"]
), "Persistent key not found in Redis"
else:
assert not self.redis.sismember(
"persistent-keys", event["key"]
), "Persistent key found in Redis but should have been removed."
# compare expected message with received one. exit on
# mismatch.
if isinstance(expected, list) and expected:
expected_item = expected.pop(0)
else:
expected_item = expected
for key in set(event.keys()).intersection(expected_item.keys()):
if "time" in key:
expected_item[key] += self.time_now
# use unix timstamp instead of datetime objects
if message.delivery_info["routing_key"] == "hijack-update":
event[key] = datetime.datetime(
*map(int, re.findall(r"\d+", event[key]))
).timestamp()
assert event[key] == expected_item[key] or (
isinstance(event[key], (list, set))
and set(event[key]) == set(expected_item[key])
), (
'Test "{}" - Batch #{} - Type {}: Unexpected'
' value for key "{}". Received: {}, Expected: {}'.format(
self.curr_test,
self.curr_idx,
message.delivery_info["routing_key"],
key,
event[key],
expected_item[key],
)
)
self.expected_messages -= 1
if self.expected_messages <= 0:
self.curr_idx += 1
message.ack()
def send_next_message(self, conn):
"""
Publish next custom BGP update on the bgp-updates exchange.
"""
with conn.Producer() as producer:
self.expected_messages = 0
for key in self.messages[self.curr_idx]:
if key != "send":
if isinstance(self.messages[self.curr_idx][key], dict):
self.expected_messages += 1
else:
self.expected_messages += len(self.messages[self.curr_idx][key])
# offset to account for "real-time" tests
for key in self.messages[self.curr_idx]["send"]:
if "time" in key:
self.messages[self.curr_idx]["send"][key] += self.time_now
producer.publish(
self.messages[self.curr_idx]["send"],
exchange=self.update_exchange,
routing_key="update",
serializer="ujson",
)
@staticmethod
def config_request_rpc(conn):
"""
Initial RPC of this service to request the configuration.
The RPC is blocked until the configuration service replies back.
"""
correlation_id = uuid()
callback_queue = Queue(
uuid(),
channel=conn.default_channel,
durable=False,
auto_delete=True,
max_priority=4,
consumer_arguments={"x-priority": 4},
)
with conn.Producer() as producer:
producer.publish(
"",
exchange="",
routing_key="config-request-queue",
reply_to=callback_queue.name,
correlation_id=correlation_id,
retry=True,
declare=[
Queue(
"config-request-queue",
durable=False,
max_priority=4,
consumer_arguments={"x-priority": 4},
),
callback_queue,
],
priority=4,
serializer="ujson",
)
while True:
if callback_queue.get():
break
time.sleep(0.1)
print("Config RPC finished")
def test(self):
"""
Loads a test file that includes crafted bgp updates as
input and expected messages as output.
"""
RABBITMQ_USER = os.getenv("RABBITMQ_USER", "guest")
RABBITMQ_PASS = os.getenv("RABBITMQ_PASS", "<PASSWORD>")
RABBITMQ_HOST = os.getenv("RABBITMQ_HOST", "rabbitmq")
RABBITMQ_PORT = os.getenv("RABBITMQ_PORT", 5672)
RABBITMQ_URI = "amqp://{}:{}@{}:{}//".format(
RABBITMQ_USER, RABBITMQ_PASS, RABBITMQ_HOST, RABBITMQ_PORT
)
# exchanges
self.update_exchange = Exchange(
"bgp-update", type="direct", durable=False, delivery_mode=1
)
self.hijack_exchange = Exchange(
"hijack-update", type="direct", durable=False, delivery_mode=1
)
self.pg_amq_bridge = Exchange(
"amq.direct", type="direct", durable=True, delivery_mode=1
)
# queues
self.update_queue = Queue(
"detection-testing",
exchange=self.pg_amq_bridge,
routing_key="update-update",
durable=False,
auto_delete=True,
max_priority=1,
consumer_arguments={"x-priority": 1},
)
self.hijack_queue = Queue(
"hijack-testing",
exchange=self.hijack_exchange,
routing_key="update",
durable=False,
auto_delete=True,
max_priority=1,
consumer_arguments={"x-priority": 1},
)
self.hijack_db_queue = Queue(
"hijack-db-testing",
exchange=self.pg_amq_bridge,
routing_key="hijack-update",
durable=False,
auto_delete=True,
max_priority=1,
consumer_arguments={"x-priority": 1},
)
with Connection(RABBITMQ_URI) as connection:
print("Waiting for pg_amq exchange..")
Tester.waitExchange(self.pg_amq_bridge, connection.default_channel)
print("Waiting for hijack exchange..")
Tester.waitExchange(self.hijack_exchange, connection.default_channel)
print("Waiting for update exchange..")
Tester.waitExchange(self.update_exchange, connection.default_channel)
# query database for the states of the processes
db_con = self.getDbConnection()
db_cur = db_con.cursor()
query = "SELECT name FROM process_states WHERE running=True"
running_modules = set()
# wait until all 6 modules are running
while len(running_modules) < 6:
db_cur.execute(query)
entries = db_cur.fetchall()
for entry in entries:
running_modules.add(entry[0])
db_con.commit()
print("Running modules: {}".format(running_modules))
print("{}/6 modules are running.".format(len(running_modules)))
time.sleep(1)
Tester.config_request_rpc(connection)
time.sleep(10)
# call all helper functions
Helper.hijack_resolve(
db_con, connection, "a", "172.16.58.3/24", "S|0|-|-", 133720
)
Helper.hijack_mitigate(db_con, connection, "b", "10.91.236.0/24")
Helper.hijack_ignore(
db_con, connection, "c", "172.16.58.3/24", "S|0|-|-", 136334
)
Helper.hijack_comment(db_con, connection, "d", "test")
Helper.hijack_ack(db_con, connection, "e", "true")
Helper.hijack_multiple_action(
db_con, connection, ["f", "g"], "hijack_action_acknowledge"
)
Helper.hijack_multiple_action(
db_con, connection, ["f", "g"], "hijack_action_acknowledge_not"
)
Helper.hijack_multiple_action(
db_con, connection, ["f"], "hijack_action_resolve"
)
Helper.hijack_multiple_action(
db_con, connection, ["g"], "hijack_action_ignore"
)
# multi-action delete a hijack purged from cache
Helper.hijack_multiple_action(
db_con, connection, ["f"], "hijack_action_delete"
)
# delete a hijack purged from cache
Helper.hijack_delete(
db_con, connection, "g", "172.16.31.10/22", "S|0|-|-", 133676
)
# multi-action delete a hijack using cache
Helper.hijack_multiple_action(
db_con, connection, ["h"], "hijack_action_delete"
)
# delete a hijack using cache
Helper.hijack_delete(
db_con, connection, "i", "172.16.31.10/24", "S|0|-|-", 133720
)
Helper.hijack_mitigate(db_con, connection, "j", "2001:db8:abcd:12::0/80")
Helper.load_as_sets(connection)
time.sleep(10)
db_cur.close()
db_con.close()
for testfile in os.listdir("testfiles/"):
self.clear()
self.curr_test = testfile
self.messages = {}
# load test
with open("testfiles/{}".format(testfile), "r") as f:
self.messages = json.load(f)
send_len = len(self.messages)
with nested(
connection.Consumer(
self.hijack_queue,
callbacks=[self.validate_message],
accept=["ujson"],
),
connection.Consumer(
self.update_queue,
callbacks=[self.validate_message],
accept=["ujson", "txtjson"],
),
connection.Consumer(
self.hijack_db_queue,
callbacks=[self.validate_message],
accept=["ujson", "txtjson"],
),
):
send_cnt = 0
# send and validate all messages in the messages.json file
while send_cnt < send_len:
self.curr_idx = send_cnt
self.send_next_message(connection)
send_cnt += 1
# sleep until we receive all expected messages
while self.curr_idx != send_cnt:
time.sleep(0.1)
try:
connection.drain_events(timeout=10)
except socket.timeout:
# avoid infinite loop by timeout
assert False, "Consumer timeout"
connection.close()
with open("configs/config.yaml") as f1, open("configs/config2.yaml") as f2:
new_data = f2.read()
old_data = f1.read()
Helper.change_conf(connection, new_data, old_data, "test")
time.sleep(5)
self.supervisor.supervisor.stopAllProcesses()
self.waitProcess("listener", 0) # 0 STOPPED
self.waitProcess("clock", 0) # 0 STOPPED
self.waitProcess("detection", 0) # 0 STOPPED
self.waitProcess("mitigation", 0) # 0 STOPPED
self.waitProcess("configuration", 0) # 0 STOPPED
self.waitProcess("database", 0) # 0 STOPPED
self.waitProcess("observer", 0) # 0 STOPPED
class Helper:
@staticmethod
def hijack_resolve(db_con, connection, hijack_key, prefix, type_, hijack_as):
hijack_exchange = Exchange(
"hijack-update", type="direct", durable=False, delivery_mode=1
)
with connection.Producer() as producer:
producer.publish(
{
"key": hijack_key,
"prefix": prefix,
"type": type_,
"hijack_as": hijack_as,
},
exchange=hijack_exchange,
routing_key="resolve",
priority=2,
serializer="ujson",
)
result = hijack_action_test_result(db_con, hijack_key, "resolved")
assert (
result is True
), 'Action "hijack_resolve" for hijack id #{0} failed'.format(hijack_key)
@staticmethod
def hijack_mitigate(db_con, connection, hijack_key, prefix):
mitigation_exchange = Exchange(
"mitigation", type="direct", durable=False, delivery_mode=1
)
with connection.Producer() as producer:
producer.publish(
{"key": hijack_key, "prefix": prefix},
exchange=mitigation_exchange,
routing_key="mitigate",
priority=2,
serializer="ujson",
)
result = hijack_action_test_result(db_con, hijack_key, "under_mitigation")
assert (
result is True
), 'Action "hijack_mitigate" for hijack id #{0} failed'.format(hijack_key)
@staticmethod
def hijack_ignore(db_con, connection, hijack_key, prefix, type_, hijack_as):
hijack_exchange = Exchange(
"hijack-update", type="direct", durable=False, delivery_mode=1
)
with connection.Producer() as producer:
producer.publish(
{
"key": hijack_key,
"prefix": prefix,
"type": type_,
"hijack_as": hijack_as,
},
exchange=hijack_exchange,
routing_key="ignore",
priority=2,
serializer="ujson",
)
result = hijack_action_test_result(db_con, hijack_key, "ignored")
assert (
result is True
), 'Action "hijack_ignore" for hijack id #{0} failed'.format(hijack_key)
@staticmethod
def hijack_comment(db_con, connection, hijack_key, comment):
correlation_id = uuid()
callback_queue = Queue(
uuid(),
channel=connection.default_channel,
durable=False,
exclusive=True,
auto_delete=True,
max_priority=4,
consumer_arguments={"x-priority": 4},
)
with connection.Producer() as producer:
producer.publish(
{"key": hijack_key, "comment": | |
between client
pings. If a client sends pings more frequently the server will disconnect
from the client.
:param str server_interval: (optional) The time between pings to clients.
:param str server_timeout: (optional) The duration the server will wait for
a response from a client before closing the connection.
"""
self.server_min_interval = server_min_interval
self.server_interval = server_interval
self.server_timeout = server_timeout
@classmethod
def from_dict(cls, _dict: Dict) -> 'ConfigOrdererKeepalive':
"""Initialize a ConfigOrdererKeepalive object from a json dictionary."""
args = {}
if 'ServerMinInterval' in _dict:
args['server_min_interval'] = _dict.get('ServerMinInterval')
if 'ServerInterval' in _dict:
args['server_interval'] = _dict.get('ServerInterval')
if 'ServerTimeout' in _dict:
args['server_timeout'] = _dict.get('ServerTimeout')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ConfigOrdererKeepalive object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'server_min_interval') and self.server_min_interval is not None:
_dict['ServerMinInterval'] = self.server_min_interval
if hasattr(self, 'server_interval') and self.server_interval is not None:
_dict['ServerInterval'] = self.server_interval
if hasattr(self, 'server_timeout') and self.server_timeout is not None:
_dict['ServerTimeout'] = self.server_timeout
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ConfigOrdererKeepalive object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ConfigOrdererKeepalive') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ConfigOrdererKeepalive') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfigOrdererMetrics():
"""
ConfigOrdererMetrics.
:attr str provider: (optional) The metrics provider to use.
:attr ConfigOrdererMetricsStatsd statsd: (optional) The statsd configuration.
"""
def __init__(self,
*,
provider: str = None,
statsd: 'ConfigOrdererMetricsStatsd' = None) -> None:
"""
Initialize a ConfigOrdererMetrics object.
:param str provider: (optional) The metrics provider to use.
:param ConfigOrdererMetricsStatsd statsd: (optional) The statsd
configuration.
"""
self.provider = provider
self.statsd = statsd
@classmethod
def from_dict(cls, _dict: Dict) -> 'ConfigOrdererMetrics':
"""Initialize a ConfigOrdererMetrics object from a json dictionary."""
args = {}
if 'Provider' in _dict:
args['provider'] = _dict.get('Provider')
if 'Statsd' in _dict:
args['statsd'] = ConfigOrdererMetricsStatsd.from_dict(_dict.get('Statsd'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ConfigOrdererMetrics object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'provider') and self.provider is not None:
_dict['Provider'] = self.provider
if hasattr(self, 'statsd') and self.statsd is not None:
_dict['Statsd'] = self.statsd.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ConfigOrdererMetrics object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ConfigOrdererMetrics') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ConfigOrdererMetrics') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ProviderEnum(str, Enum):
"""
The metrics provider to use.
"""
DISABLED = 'disabled'
STATSD = 'statsd'
PROMETHEUS = 'prometheus'
class ConfigPeerChaincodeExternalBuildersItem():
"""
ConfigPeerChaincodeExternalBuildersItem.
:attr str path: (optional) The path to a build directory.
:attr str name: (optional) The name of this builder.
:attr List[str] environment_whitelist: (optional)
"""
def __init__(self,
*,
path: str = None,
name: str = None,
environment_whitelist: List[str] = None) -> None:
"""
Initialize a ConfigPeerChaincodeExternalBuildersItem object.
:param str path: (optional) The path to a build directory.
:param str name: (optional) The name of this builder.
:param List[str] environment_whitelist: (optional)
"""
self.path = path
self.name = name
self.environment_whitelist = environment_whitelist
@classmethod
def from_dict(cls, _dict: Dict) -> 'ConfigPeerChaincodeExternalBuildersItem':
"""Initialize a ConfigPeerChaincodeExternalBuildersItem object from a json dictionary."""
args = {}
if 'path' in _dict:
args['path'] = _dict.get('path')
if 'name' in _dict:
args['name'] = _dict.get('name')
if 'environmentWhitelist' in _dict:
args['environment_whitelist'] = _dict.get('environmentWhitelist')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ConfigPeerChaincodeExternalBuildersItem object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'path') and self.path is not None:
_dict['path'] = self.path
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'environment_whitelist') and self.environment_whitelist is not None:
_dict['environmentWhitelist'] = self.environment_whitelist
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ConfigPeerChaincodeExternalBuildersItem object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ConfigPeerChaincodeExternalBuildersItem') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ConfigPeerChaincodeExternalBuildersItem') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfigPeerChaincodeGolang():
"""
ConfigPeerChaincodeGolang.
:attr bool dynamic_link: (optional) Controls if golang chaincode should be built
with dynamic linking or static linking. Defaults `false` (static).
"""
def __init__(self,
*,
dynamic_link: bool = None) -> None:
"""
Initialize a ConfigPeerChaincodeGolang object.
:param bool dynamic_link: (optional) Controls if golang chaincode should be
built with dynamic linking or static linking. Defaults `false` (static).
"""
self.dynamic_link = dynamic_link
@classmethod
def from_dict(cls, _dict: Dict) -> 'ConfigPeerChaincodeGolang':
"""Initialize a ConfigPeerChaincodeGolang object from a json dictionary."""
args = {}
if 'dynamicLink' in _dict:
args['dynamic_link'] = _dict.get('dynamicLink')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ConfigPeerChaincodeGolang object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'dynamic_link') and self.dynamic_link is not None:
_dict['dynamicLink'] = self.dynamic_link
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ConfigPeerChaincodeGolang object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ConfigPeerChaincodeGolang') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ConfigPeerChaincodeGolang') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfigPeerChaincodeLogging():
"""
ConfigPeerChaincodeLogging.
:attr str level: (optional) Default logging level for loggers within chaincode
containers.
:attr str shim: (optional) Override default level for the 'shim' logger.
:attr str format: (optional) Override the default log format for chaincode
container logs.
"""
def __init__(self,
*,
level: str = None,
shim: str = None,
format: str = None) -> None:
"""
Initialize a ConfigPeerChaincodeLogging object.
:param str level: (optional) Default logging level for loggers within
chaincode containers.
:param str shim: (optional) Override default level for the 'shim' logger.
:param str format: (optional) Override the default log format for chaincode
container logs.
"""
self.level = level
self.shim = shim
self.format = format
@classmethod
def from_dict(cls, _dict: Dict) -> 'ConfigPeerChaincodeLogging':
"""Initialize a ConfigPeerChaincodeLogging object from a json dictionary."""
args = {}
if 'level' in _dict:
args['level'] = _dict.get('level')
if 'shim' in _dict:
args['shim'] = _dict.get('shim')
if 'format' in _dict:
args['format'] = _dict.get('format')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ConfigPeerChaincodeLogging object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'level') and self.level is not None:
_dict['level'] = self.level
if hasattr(self, 'shim') and self.shim is not None:
_dict['shim'] = self.shim
if hasattr(self, 'format') and self.format is not None:
_dict['format'] = self.format
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ConfigPeerChaincodeLogging object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ConfigPeerChaincodeLogging') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ConfigPeerChaincodeLogging') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class LevelEnum(str, Enum):
"""
Default logging level for loggers within chaincode containers.
"""
FATAL = 'fatal'
PANIC = 'panic'
ERROR = 'error'
WARNING = 'warning'
INFO = 'info'
DEBUG = 'debug'
class ShimEnum(str, Enum):
"""
Override default level for the 'shim' logger.
"""
FATAL = 'fatal'
PANIC = 'panic'
ERROR = 'error'
WARNING = 'warning'
| |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Filter Scheduler.
"""
import mox
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor import api as conductor_api
from nova import context
from nova import db
from nova import exception
from nova.scheduler import driver
from nova.scheduler import filter_scheduler
from nova.scheduler import host_manager
from nova.scheduler import weights
from nova.tests.scheduler import fakes
from nova.tests.scheduler import test_scheduler
def fake_get_filtered_hosts(hosts, filter_properties):
return list(hosts)
def fake_get_group_filtered_hosts(hosts, filter_properties):
group_hosts = filter_properties.get('group_hosts') or []
if group_hosts:
hosts = list(hosts)
hosts.pop(0)
return hosts
else:
return list(hosts)
class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
"""Test case for Filter Scheduler."""
driver_cls = filter_scheduler.FilterScheduler
def test_run_instance_no_hosts(self):
def _fake_empty_call_zone_method(*args, **kwargs):
return []
sched = fakes.FakeFilterScheduler()
uuid = 'fake-uuid1'
fake_context = context.RequestContext('user', 'project')
instance_properties = {'project_id': 1, 'os_type': 'Linux'}
request_spec = {'instance_type': {'memory_mb': 1, 'root_gb': 1,
'ephemeral_gb': 0},
'instance_properties': instance_properties,
'instance_uuids': [uuid]}
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
old_ref, new_ref = db.instance_update_and_get_original(fake_context,
uuid, {'vm_state': vm_states.ERROR, 'task_state':
None}).AndReturn(({}, {}))
compute_utils.add_instance_fault_from_exc(fake_context,
mox.IsA(conductor_api.LocalAPI), new_ref,
mox.IsA(exception.NoValidHost), mox.IgnoreArg())
self.mox.StubOutWithMock(db, 'compute_node_get_all')
db.compute_node_get_all(mox.IgnoreArg()).AndReturn([])
self.mox.ReplayAll()
sched.schedule_run_instance(
fake_context, request_spec, None, None, None, None, {})
def test_run_instance_non_admin(self):
self.was_admin = False
def fake_get(context, *args, **kwargs):
# make sure this is called with admin context, even though
# we're using user context below
self.was_admin = context.is_admin
return {}
sched = fakes.FakeFilterScheduler()
self.stubs.Set(sched.host_manager, 'get_all_host_states', fake_get)
fake_context = context.RequestContext('user', 'project')
uuid = 'fake-uuid1'
instance_properties = {'project_id': 1, 'os_type': 'Linux'}
request_spec = {'instance_type': {'memory_mb': 1, 'local_gb': 1},
'instance_properties': instance_properties,
'instance_uuids': [uuid]}
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
old_ref, new_ref = db.instance_update_and_get_original(fake_context,
uuid, {'vm_state': vm_states.ERROR, 'task_state':
None}).AndReturn(({}, {}))
compute_utils.add_instance_fault_from_exc(fake_context,
mox.IsA(conductor_api.LocalAPI), new_ref,
mox.IsA(exception.NoValidHost), mox.IgnoreArg())
self.mox.ReplayAll()
sched.schedule_run_instance(
fake_context, request_spec, None, None, None, None, {})
self.assertTrue(self.was_admin)
def test_scheduler_includes_launch_index(self):
fake_context = context.RequestContext('user', 'project')
instance_opts = {'fake_opt1': 'meow'}
request_spec = {'instance_uuids': ['fake-uuid1', 'fake-uuid2'],
'instance_properties': instance_opts}
instance1 = {'uuid': 'fake-uuid1'}
instance2 = {'uuid': 'fake-uuid2'}
def _has_launch_index(expected_index):
"""Return a function that verifies the expected index."""
def _check_launch_index(value):
if 'instance_properties' in value:
if 'launch_index' in value['instance_properties']:
index = value['instance_properties']['launch_index']
if index == expected_index:
return True
return False
return _check_launch_index
self.mox.StubOutWithMock(self.driver, '_schedule')
self.mox.StubOutWithMock(self.driver, '_provision_resource')
self.driver._schedule(fake_context, request_spec, {},
['fake-uuid1', 'fake-uuid2']).AndReturn(['host1', 'host2'])
# instance 1
self.driver._provision_resource(
fake_context, 'host1',
mox.Func(_has_launch_index(0)), {},
None, None, None, None,
instance_uuid='fake-uuid1').AndReturn(instance1)
# instance 2
self.driver._provision_resource(
fake_context, 'host2',
mox.Func(_has_launch_index(1)), {},
None, None, None, None,
instance_uuid='fake-uuid2').AndReturn(instance2)
self.mox.ReplayAll()
self.driver.schedule_run_instance(fake_context, request_spec,
None, None, None, None, {})
def test_schedule_happy_day(self):
"""Make sure there's nothing glaringly wrong with _schedule()
by doing a happy day pass through.
"""
self.next_weight = 1.0
def _fake_weigh_objects(_self, functions, hosts, options):
self.next_weight += 2.0
host_state = hosts[0]
return [weights.WeighedHost(host_state, self.next_weight)]
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project',
is_admin=True)
self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
fake_get_filtered_hosts)
self.stubs.Set(weights.HostWeightHandler,
'get_weighed_objects', _fake_weigh_objects)
fakes.mox_host_manager_db_calls(self.mox, fake_context)
request_spec = {'num_instances': 10,
'instance_type': {'memory_mb': 512, 'root_gb': 512,
'ephemeral_gb': 0,
'vcpus': 1},
'instance_properties': {'project_id': 1,
'root_gb': 512,
'memory_mb': 512,
'ephemeral_gb': 0,
'vcpus': 1,
'os_type': 'Linux'}}
self.mox.ReplayAll()
weighed_hosts = sched._schedule(fake_context, request_spec, {})
self.assertEquals(len(weighed_hosts), 10)
for weighed_host in weighed_hosts:
self.assertTrue(weighed_host.obj is not None)
def test_schedule_prep_resize_doesnt_update_host(self):
fake_context = context.RequestContext('user', 'project',
is_admin=True)
sched = fakes.FakeFilterScheduler()
def _return_hosts(*args, **kwargs):
host_state = host_manager.HostState('host2', 'node2')
return [weights.WeighedHost(host_state, 1.0)]
self.stubs.Set(sched, '_schedule', _return_hosts)
info = {'called': 0}
def _fake_instance_update_db(*args, **kwargs):
# This should not be called
info['called'] = 1
self.stubs.Set(driver, 'instance_update_db',
_fake_instance_update_db)
instance = {'uuid': 'fake-uuid', 'host': 'host1'}
sched.schedule_prep_resize(fake_context, {}, {}, {},
instance, {}, None)
self.assertEqual(info['called'], 0)
def test_max_attempts(self):
self.flags(scheduler_max_attempts=4)
sched = fakes.FakeFilterScheduler()
self.assertEqual(4, sched._max_attempts())
def test_invalid_max_attempts(self):
self.flags(scheduler_max_attempts=0)
sched = fakes.FakeFilterScheduler()
self.assertRaises(exception.NovaException, sched._max_attempts)
def test_retry_disabled(self):
# Retry info should not get populated when re-scheduling is off.
self.flags(scheduler_max_attempts=1)
sched = fakes.FakeFilterScheduler()
instance_properties = {'project_id': '12345', 'os_type': 'Linux'}
request_spec = dict(instance_properties=instance_properties)
filter_properties = {}
self.mox.StubOutWithMock(db, 'compute_node_get_all')
db.compute_node_get_all(mox.IgnoreArg()).AndReturn([])
self.mox.ReplayAll()
sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
# should not have retry info in the populated filter properties:
self.assertFalse("retry" in filter_properties)
def test_retry_attempt_one(self):
# Test retry logic on initial scheduling attempt.
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
instance_properties = {'project_id': '12345', 'os_type': 'Linux'}
request_spec = dict(instance_properties=instance_properties)
filter_properties = {}
self.mox.StubOutWithMock(db, 'compute_node_get_all')
db.compute_node_get_all(mox.IgnoreArg()).AndReturn([])
self.mox.ReplayAll()
sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
num_attempts = filter_properties['retry']['num_attempts']
self.assertEqual(1, num_attempts)
def test_retry_attempt_two(self):
# Test retry logic when re-scheduling.
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
instance_properties = {'project_id': '12345', 'os_type': 'Linux'}
request_spec = dict(instance_properties=instance_properties)
retry = dict(num_attempts=1)
filter_properties = dict(retry=retry)
self.mox.StubOutWithMock(db, 'compute_node_get_all')
db.compute_node_get_all(mox.IgnoreArg()).AndReturn([])
self.mox.ReplayAll()
sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
num_attempts = filter_properties['retry']['num_attempts']
self.assertEqual(2, num_attempts)
def test_retry_exceeded_max_attempts(self):
# Test for necessary explosion when max retries is exceeded and that
# the information needed in request_spec is still present for error
# handling
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
instance_properties = {'project_id': '12345', 'os_type': 'Linux'}
instance_uuids = ['fake-id']
request_spec = dict(instance_properties=instance_properties,
instance_uuids=instance_uuids)
retry = dict(num_attempts=2)
filter_properties = dict(retry=retry)
self.assertRaises(exception.NoValidHost, sched.schedule_run_instance,
self.context, request_spec, admin_password=<PASSWORD>,
injected_files=None, requested_networks=None,
is_first_time=False,
filter_properties=filter_properties)
uuids = request_spec.get('instance_uuids')
self.assertEqual(uuids, instance_uuids)
def test_add_retry_host(self):
retry = dict(num_attempts=1, hosts=[])
filter_properties = dict(retry=retry)
host = "fakehost"
node = "fakenode"
sched = fakes.FakeFilterScheduler()
sched._add_retry_host(filter_properties, host, node)
hosts = filter_properties['retry']['hosts']
self.assertEqual(1, len(hosts))
self.assertEqual([host, node], hosts[0])
def test_post_select_populate(self):
# Test addition of certain filter props after a node is selected.
retry = {'hosts': [], 'num_attempts': 1}
filter_properties = {'retry': retry}
sched = fakes.FakeFilterScheduler()
host_state = host_manager.HostState('host', 'node')
host_state.limits['vcpus'] = 5
sched._post_select_populate_filter_properties(filter_properties,
host_state)
self.assertEqual(['host', 'node'],
filter_properties['retry']['hosts'][0])
self.assertEqual({'vcpus': 5}, host_state.limits)
def test_prep_resize_post_populates_retry(self):
# Prep resize should add a ('host', 'node') entry to the retry dict.
sched = fakes.FakeFilterScheduler()
image = 'image'
instance = {'disable_terminate': False,
'uuid': 'fakeuuid',
'deleted': 0, 'info_cache': {},
'created_at': None,
'system_metadata': [], 'shutdown_terminate': False,
'id': 1, 'security_groups': [], 'metadata': []}
instance_properties = {'project_id': 'fake', 'os_type': 'Linux'}
instance_type = {
'memory_mb': 1024, 'root_gb': 40, 'deleted_at': None,
'name': u'm1.medium', 'deleted': 0, 'created_at': None,
'ephemeral_gb': 0, 'updated_at': None, 'disabled': False,
'vcpus': 2, 'extra_specs': {}, 'swap': 0,
'rxtx_factor': 1.0, 'is_public': True, 'flavorid': u'3',
'vcpu_weight': None, 'id': 1}
request_spec = {'instance_properties': instance_properties,
'instance_type': instance_type}
retry = {'hosts': [], 'num_attempts': 1}
filter_properties = {'retry': retry}
reservations = None
host = fakes.FakeHostState('host', 'node', {})
weighed_host = weights.WeighedHost(host, 1)
weighed_hosts = [weighed_host]
self.mox.StubOutWithMock(sched, '_schedule')
self.mox.StubOutWithMock(sched.compute_rpcapi, 'prep_resize')
sched._schedule(self.context, request_spec, filter_properties,
[instance['uuid']]).AndReturn(weighed_hosts)
sched.compute_rpcapi.prep_resize(self.context, image, instance,
instance_type, 'host', reservations, request_spec=request_spec,
filter_properties=filter_properties, node='node')
self.mox.ReplayAll()
sched.schedule_prep_resize(self.context, image, request_spec,
filter_properties, instance, instance_type, reservations)
self.assertEqual([['host', 'node']],
filter_properties['retry']['hosts'])
def test_basic_schedule_run_instances_anti_affinity(self):
filter_properties = {'scheduler_hints':
{'group': 'cats'}}
# Request spec 1
instance_opts1 = {'project_id': 1, 'os_type': 'Linux',
'memory_mb': 512, 'root_gb': 512,
'ephemeral_gb': 0, 'vcpus': 1,
'system_metadata': {'system': 'metadata'}}
request_spec1 = {'instance_uuids': ['fake-uuid1-1', 'fake-uuid1-2'],
'instance_properties': instance_opts1,
'instance_type': {'memory_mb': 512, 'root_gb': 512,
'ephemeral_gb': 0, 'vcpus': 1}}
self.next_weight = 1.0
def _fake_weigh_objects(_self, functions, hosts, options):
self.next_weight += 2.0
host_state = hosts[0]
return [weights.WeighedHost(host_state, self.next_weight)]
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project',
is_admin=True)
self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
fake_get_group_filtered_hosts)
self.stubs.Set(weights.HostWeightHandler,
'get_weighed_objects', _fake_weigh_objects)
fakes.mox_host_manager_db_calls(self.mox, fake_context)
self.mox.StubOutWithMock(driver, 'instance_update_db')
self.mox.StubOutWithMock(compute_rpcapi.ComputeAPI, 'run_instance')
self.mox.StubOutWithMock(sched, 'group_hosts')
instance1_1 = {'uuid': 'fake-uuid1-1'}
instance1_2 = {'uuid': 'fake-uuid1-2'}
sched.group_hosts(mox.IgnoreArg(), 'cats').AndReturn([])
def inc_launch_index1(*args, **kwargs):
request_spec1['instance_properties']['launch_index'] = (
request_spec1['instance_properties']['launch_index'] + 1)
expected_metadata = {'system_metadata':
{'system': 'metadata', 'group': 'cats'}}
driver.instance_update_db(fake_context, instance1_1['uuid'],
extra_values=expected_metadata).WithSideEffects(
inc_launch_index1).AndReturn(instance1_1)
compute_rpcapi.ComputeAPI.run_instance(fake_context, host='host3',
instance=instance1_1, requested_networks=None,
injected_files=None, admin_password=<PASSWORD>, is_first_time=None,
request_spec=request_spec1, filter_properties=mox.IgnoreArg(),
node='node3')
driver.instance_update_db(fake_context, instance1_2['uuid'],
extra_values=expected_metadata).WithSideEffects(
inc_launch_index1).AndReturn(instance1_2)
compute_rpcapi.ComputeAPI.run_instance(fake_context, host='host4',
instance=instance1_2, requested_networks=None,
injected_files=None, admin_password=<PASSWORD>, is_first_time=None,
request_spec=request_spec1, filter_properties=mox.IgnoreArg(),
node='node4')
self.mox.ReplayAll()
sched.schedule_run_instance(fake_context, request_spec1,
None, None, None, None, filter_properties)
def test_schedule_host_pool(self):
"""Make sure the scheduler_host_subset_size property works properly."""
self.flags(scheduler_host_subset_size=2)
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project',
is_admin=True)
self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
fake_get_filtered_hosts)
fakes.mox_host_manager_db_calls(self.mox, fake_context)
instance_properties = {'project_id': 1,
'root_gb': 512,
'memory_mb': 512,
'ephemeral_gb': 0,
'vcpus': 1,
'os_type': 'Linux'}
request_spec = dict(instance_properties=instance_properties)
filter_properties = {}
self.mox.ReplayAll()
hosts = sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
# one host should be chosen
self.assertEqual(len(hosts), 1)
def test_schedule_large_host_pool(self):
"""Hosts should still be chosen if pool size
is larger than number of filtered hosts.
"""
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project',
is_admin=True)
self.flags(scheduler_host_subset_size=20)
self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
fake_get_filtered_hosts)
fakes.mox_host_manager_db_calls(self.mox, fake_context)
instance_properties = {'project_id': 1,
'root_gb': 512,
'memory_mb': 512,
'ephemeral_gb': 0,
'vcpus': 1,
'os_type': 'Linux'}
request_spec = dict(instance_properties=instance_properties)
filter_properties = {}
self.mox.ReplayAll()
hosts = sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
# one host should be chose
self.assertEqual(len(hosts), 1)
def test_schedule_chooses_best_host(self):
"""If scheduler_host_subset_size is 1, the largest host with greatest
weight should be | |
from collections import OrderedDict
from copy import deepcopy
import time
import torch
import torch.nn as nn
import torch.nn.functional as f
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
class FFNN(nn.Module):
"""
Feedforward neural network for modelling (chaotic) time series data.
(currently only works for 1-dimensional data e.g. MackeyGlass).
Args:
input_size: Number of frames of context (data for previous time steps).
(not to be confused with data dimensionality).
hidden_size: Number of hidden units per hidden layer.
n_hidden_layers: Number of hidden layers (not including input+output layers).
activation: PyTorch activation (class, NOT an instance)
"""
def __init__(self, input_size, hidden_size, n_hidden_layers, activation=None):
super(FFNN, self).__init__()
self.input_size = int(input_size)
self.hidden_size = int(input_size)
self.n_hidden_layers = int(n_hidden_layers)
if activation is None:
activation = nn.Sigmoid
else:
assert type(activation) == type, "Pass the TYPE of activation, not an instance of it."
self.activ_str = str(activation)[:-2]
layers = OrderedDict()
layers['linear1'] = nn.Linear(input_size, hidden_size) # input layer
layers['activ1'] = activation()
for i in range(2, n_hidden_layers+2):
# add hidden layers
k1, k2 = 'linear%d' % i, 'activ%d' % i
layers[k1] = nn.Linear(hidden_size, hidden_size)
layers[k2] = activation()
out_key = 'linear%d' % (n_hidden_layers + 2)
layers[out_key] = nn.Linear(hidden_size, 1) # output layer
self.model = nn.Sequential(layers)
def forward(self, x):
return self.model(x)
def train(model, train_data, batch_size, num_epochs, criterion, optimizer, valid_data=None,
verbose=1, eval_gen_loss=False, n_generate_timesteps=2000):
input_size = model.input_size
#assert (len(train_data) - input_size) % batch_size == 0, \
# "there is leftover training data that doesn't fit neatly into a batch"
n_iter = int((len(train_data) - input_size) / batch_size)
# rows: epoch number. columns: (sup. train nrmse, sup. valid nrmse, gen. train nrmse,
# gen. valid nrmse). If valid_data not provided, last 3 columns are zeros.
# Else if eval_gen_loss=False, last two columns zeros.
stats = np.zeros((num_epochs, 4))
if eval_gen_loss:
# 'early stopping': return the model that gives lowest validation generation NRMSE
best_model = (None, np.inf, None)
for epoch in range(num_epochs):
train_loss = 0.
for i in range(0, n_iter, batch_size):
inputs = torch.FloatTensor(batch_size, input_size)
targets = torch.FloatTensor(batch_size)
for batch_idx, j in enumerate(range(i, i+batch_size)):
# inputs[batch_idx] = torch.FloatTensor(train_data[j:(j+input_size)])
inputs[batch_idx] = torch.FloatTensor(train_data[j:(j+input_size)])
targets[batch_idx] = train_data[j+input_size]
inputs = Variable(inputs)
targets = Variable(targets)
# fprop, bprop, optimize
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
# normalized root mean square error
train_loss += nrmse(outputs, targets)**2
train_loss = np.sqrt(train_loss)
stats[epoch, 0] = train_loss
if verbose:
print('='*50)
print('Epoch [%d/%d]' % (epoch+1, num_epochs))
print('Total sup. training NRMSE: %.7f' % train_loss)
# Calculate GENERATION training loss ======================================
if eval_gen_loss:
gen_outs, nrms_error = test(model, train_data[:(n_generate_timesteps+input_size)],
plot=False)
print('Generation training NRMSE (for %d time steps): %.7f' % \
(n_generate_timesteps, nrms_error))
stats[epoch, 2] = nrms_error
if valid_data is not None:
# Calculate SUPERVISED validation loss ================================
outputs = []
da_targets = []
for i in range(len(valid_data) - input_size):
inputs = valid_data[i:(i+input_size)]
inputs = Variable(torch.FloatTensor(inputs))
target = Variable(torch.FloatTensor([valid_data[i+input_size]]))
da_targets.append(target)
output = model(inputs)
mse = criterion(output, target).data[0]
outputs += [output]
valid_loss = float(nrmse(outputs, da_targets))
stats[epoch, 1] = valid_loss
if verbose:
print('Total sup. validation NRMSE: %.7f' % valid_loss)
if eval_gen_loss:
# Now calculate GENERATION validation loss ===========================
gen_outs, nrms_error = test(model, valid_data[:(n_generate_timesteps+input_size)],
plot=False)
print('Generation validation NRMSE (for %d time steps): %.7f' % \
(n_generate_timesteps, nrms_error))
stats[epoch, 3] = nrms_error
if nrms_error <= best_model[1]:
best_model = (deepcopy(model), nrms_error, epoch)
if valid_data is not None and eval_gen_loss:
print('BEST EPOCH: %d' % (best_model[2]+1))
return best_model[0], stats
else:
return model, stats
def to_numpy(arr):
if isinstance(arr, list) and isinstance(arr[0], Variable):
arr = [o.data.numpy() for o in arr]
if isinstance(arr, list) and isinstance(arr[0], torch.Tensor):
arr = [o.numpy() for o in arr]
if isinstance(arr, Variable):
arr = arr.data
if isinstance(arr, torch.Tensor):
arr = arr.numpy()
return np.array(arr).squeeze()
def nrmse(outputs, targets):
"""
fuck dynamic typing, this takes any array-like things and returns an NRMSE.
info: www.doc.ic.ac.uk/teaching/distinguished-projects/2013/j.forman-gornall.pdf
(^ ERROR there: pi in the denominator should be oi)
"""
outputs = to_numpy(outputs)
targets = to_numpy(targets)
assert len(outputs.shape) == 1
assert len(targets.shape) == 1
# normalizer: square error if we just predicted the true mean
numer = np.sum((outputs - targets)**2)
# denom = np.sum((targets - np.mean(targets))**2) # <- 'true' mean: mean of argument 'targets'
denom = np.sum((targets - __DATA_MEAN__)**2) # <- 'true' mean: mean of all training+test data
# normalizer: variance of data
#denom = __DATA_VAR__
#numer = np.mean((outputs - targets)**2)
return np.sqrt(numer / denom)
def calculate_rmse(outputs, targets):
outputs = to_numpy(outputs)
targets = to_numpy(targets)
return np.sqrt(np.mean((outputs - targets)**2))
def test(model, data, sample_step=None, plot=True, show_error=True, save_fig=False, title=None):
"""
Pass the trained model.
Returns (generated_outputs, generation_nrmse).
"""
input_size = model.input_size
inputs = data[:input_size] # type(inputs) = list
output = model(Variable(torch.FloatTensor(inputs))).data[0]
generated_data = [output]
for i in range(input_size, len(data)-1):
# every 'sample_step' iterations, feed the true value back in instead of generated value
if sample_step is not None and (i % sample_step) == 0:
inputs.extend([data[i]])
inputs = inputs[1:]
else:
inputs.extend([output]) # shift input
inputs = inputs[1:] # data
output = model(Variable(torch.FloatTensor(inputs))).data[0]
generated_data.append(output)
error = nrmse(generated_data, data[input_size:])
rmse = calculate_rmse(generated_data, data[input_size:])
# print('MSE: %.7f' % error)
if plot:
xs = range(len(generated_data))
f, ax = plt.subplots(figsize=(16, 10))
if title is not None:
ax.set_title(title+('; error=%.5f' % error))
ax.plot(xs, data[input_size:], label='True data')
ax.plot(xs, generated_data, label='Generated data')
if sample_step is not None:
smp_xs = np.arange(0, len(xs), sample_step)
smp_ys = [data[x+input_size] for x in smp_xs]
ax.scatter(smp_xs, smp_ys, label='sampling markers')
if show_error:
err_plt = np.array(generated_data) - np.array(data[input_size:])
ax.plot(xs, err_plt, label='error')
ax.plot(xs, [0]*len(xs), linestyle='--')
plt.legend()
if save_fig:
assert title is not None, "Provide a title/filename to save results."
f.savefig(title)
plt.show()
return generated_data, error
if __name__ == "__main__":
# Experiment settings / parameters ========================================================
t = str(time.time()).replace('.', 'p')
eval_valid = True # whether or not to evaluate MSE loss on test set during training
eval_gener = True # whether or not to generate future values, calculate that MSE loss
eval_gen_loss = True
save_fig = False
save_results = False
reg = 1e-3 # lambda for L2 regularization
n_generate_timesteps = 2000
learn_rate = 0.009
n_epochs = 100
# ========================================================================================
# Get data ===============================================================================
from MackeyGlass.MackeyGlassGenerator import run
data = run(num_data_samples=21000)
data_var = np.var(np.array(data))
__DATA_VAR__ = np.var(np.array(data))
__DATA_MEAN__ = np.mean(np.array(data))
print('data mean, variance: %.5f, %.5f' % (__DATA_MEAN__, __DATA_VAR__))
train_data = data[:14000]
if eval_valid:
valid_data = data[14000:20000]
else:
valid_data = None
test_data = data[20000:]
# Set up model, loss function, optimizer =================================================
model = FFNN(input_size=50, hidden_size=100, n_hidden_layers=2, activation=nn.Sigmoid)
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learn_rate)
title = "%s__ninputs%d__layers%d__nHU%d__lambda%.5f__lr%f" \
% (t, model.input_size, model.n_hidden_layers, model.hidden_size, reg, learn_rate)
title = title.replace('.', 'p') # replace period w/ 'p' so can be used as filename
# Train model ============================================================================
model, stats = train(model, train_data, 20, n_epochs, criterion, optimizer,
valid_data=valid_data, verbose=1, eval_gen_loss=eval_gen_loss,
n_generate_timesteps=n_generate_timesteps)
# losses are NORMALIZED ROOT MEAN SQUARE ERROR (not regular MSE)
train_losses = stats[:, 0]
if 1:
if eval_valid:
valid_losses = stats[:, 1]
f, (ax1, ax2) = plt.subplots(2, 1)
xs = range(len(train_losses))
ax1.plot(xs, train_losses)
ax1.set_title('Supervised training NRMSE per epoch')
ax1.set_xlabel('Epoch')
ax1.set_ylabel('Loss')
xs = range(len(valid_losses))
ax2.plot(xs, valid_losses)
ax2.set_title('Supervised validation NRMSE per epoch')
ax2.set_xlabel('Epoch')
ax2.set_ylabel('Loss')
if save_fig:
f.savefig('Results/FFNN/FIG__%s__tr-val-loss.pdf' % title)
plt.show()
else:
f, ax = plt.subplots()
xs = range(len(train_losses))
ax.plot(xs, train_losses)
ax.set_title('Training loss per epoch')
ax.set_xlabel('Epoch')
ax.set_ylabel('Loss')
if save_fig:
f.savefig('Results/FFNN/FIG__%s__tr-loss.pdf' % title)
plt.legend(); plt.show()
# IMPORTANT BITS ======================================================================
if eval_gener:
input_size = model.input_size
if valid_data is None:
valid_data = data[14000:]
g_title = 'Results/FFNN/FIG__%s__gen-loss.pdf' % title
generated_outputs, gen_mse = test(
model, valid_data[:(n_generate_timesteps+input_size)], plot=False
)
print('\n'*3)
print('='*30)
gen_mse_normed = gen_mse
print('Final model\'s validation NRMSE for %d generated values: %.7f' % \
(n_generate_timesteps, gen_mse_normed))
generated_outputs_train, gen_err_train = test(
model, train_data[:n_generate_timesteps], plot=False
)
print('gen_err_train: %.7f' % gen_err_train)
import pickle as pkl
to_save = dict()
to_save['stats'] = stats
to_save['model'] = model
to_save['gen_outputs'] = generated_outputs
to_save['gen_normed_mse'] = gen_mse_normed
to_save['n_generated_timesteps'] = n_generate_timesteps
to_save['adam_learn_rate'] = learn_rate
if save_results:
fname = 'Results/FFNN/PKL__%s.p' % title
pkl.dump(to_save, open(fname, 'wb'))
# PLOTTING GENERATION STUFF ========================================================
plot_title = ''
show_error = True
input_size = model.input_size
xs = range(len(generated_outputs))
f, ax = plt.subplots(figsize=(16, 10))
ax.set_title(plot_title)
ax.plot(xs, valid_data[input_size:(n_generate_timesteps+input_size)], label='True data')
ax.plot(xs, generated_outputs, label='Generated data')
if show_error:
errors = np.array(generated_outputs) - \
| |
"""Module containing classes for datagrid MVC implementation."""
import base64
import contextlib
import datetime
import itertools
import logging
import os
from gi.repository import (
GLib,
GObject,
Gdk,
GdkPixbuf,
Gtk,
Pango,
)
from pygtkcompat.generictreemodel import GenericTreeModel
from datagrid_gtk3.ui.popupcal import DateEntry
from datagrid_gtk3.ui.uifile import UIFile
from datagrid_gtk3.utils.dateutils import normalize_timestamp
from datagrid_gtk3.utils.imageutils import ImageCacheManager
from datagrid_gtk3.utils.transformations import get_transformer
_MEDIA_FILES = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.pardir,
"data",
"media"
)
logger = logging.getLogger(__name__)
_no_image_loader = GdkPixbuf.PixbufLoader.new_with_type("png")
_no_image_loader.write(base64.b64decode("""
iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAABmJLR0QA/wD/AP+gvaeTAAAACXBI
WXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH3wEPEDYaIuf2wwAAABl0RVh0Q29tbWVudABDcmVhdGVk
IHdpdGggR0lNUFeBDhcAAAANSURBVAjXY2BgYGAAAAAFAAFe8yo6AAAAAElFTkSuQmCC
"""))
_no_image_loader.close()
# A trivial 1px transparent png to be used on CellRendererPixbuf when there's
# no data there. Due to possible bug on gtk, passing None to it will make it
# repeat the lastest value read in a row for that column
NO_IMAGE_PIXBUF = _no_image_loader.get_pixbuf()
# Used to represent "no option selected" on filters. We use this instead of
# None as it can be a valid value for filtering.
NO_FILTER_OPTION = object()
class OptionsPopup(Gtk.Window):
"""Popup to select which columns should be displayed on datagrid.
:param toggle_btn: the toggle button responsible for popping this up
:type toggle_btn: :class:`Gtk.ToggleButton`
:param controller: the datagrid controller
:type controller: :class:`DataGridController`
"""
OPTIONS_PADDING = 5
MAX_HEIGHT = 500
(VIEW_TREE,
VIEW_FLAT,
VIEW_ICON) = range(3)
__gsignals__ = {
'column-visibility-changed': (GObject.SignalFlags.RUN_FIRST,
None, (str, bool)),
'view-changed': (GObject.SignalFlags.RUN_FIRST, None, (int, ))
}
def __init__(self, toggle_btn, controller, *args, **kwargs):
self._toggle_btn = toggle_btn
self._toggled_id = self._toggle_btn.connect(
'toggled', self.on_toggle_button_toggled)
self._controller = controller
super(OptionsPopup, self).__init__(
Gtk.WindowType.POPUP, *args, **kwargs)
self.connect('button-press-event', self.on_button_press_event)
self.connect('key-press-event', self.on_key_press_event)
self._scrolled_window = Gtk.ScrolledWindow(
vscrollbar_policy=Gtk.PolicyType.AUTOMATIC,
hscrollbar_policy=Gtk.PolicyType.NEVER)
alignment = Gtk.Alignment()
alignment.set_padding(5, 5, 5, 5)
alignment.add(self._scrolled_window)
self.add(alignment)
##
# Public
##
def popup(self):
"""Show the popup.
This will show the popup and allow the user to change
the columns visibility.
"""
if not self._toggle_btn.get_realized():
return
child = self._scrolled_window.get_child()
if child:
self._scrolled_window.remove(child)
vbox = Gtk.VBox()
combo = self._get_view_options()
if combo is not None:
vbox.pack_start(combo, expand=False, fill=False,
padding=self.OPTIONS_PADDING)
if not isinstance(self._controller.view, DataGridIconView):
if combo is not None:
vbox.pack_start(
Gtk.Separator(orientation=Gtk.Orientation.HORIZONTAL),
expand=True, fill=True, padding=self.OPTIONS_PADDING)
for switch in self._get_visibility_options():
vbox.pack_start(switch, expand=False, fill=False,
padding=self.OPTIONS_PADDING)
self._scrolled_window.add(vbox)
toplevel = self._toggle_btn.get_toplevel().get_toplevel()
if isinstance(toplevel, (Gtk.Window, Gtk.Dialog)):
group = toplevel.get_group()
if group:
group.add_window(self)
x, y = self._get_position()
self.move(x, y)
self.show_all()
allocation = vbox.get_allocation()
height = min(allocation.height + 2 * self.OPTIONS_PADDING,
self.MAX_HEIGHT)
self.set_size_request(-1, height)
if not self._popup_grab_window():
self.popdown()
def popdown(self):
"""Hide the popup."""
if not self._toggle_btn.get_realized():
return
# Make sure the toggle button is unset when popping down.
with self._toggle_btn.handler_block(self._toggled_id):
self._toggle_btn.set_active(False)
self.grab_remove()
self.hide()
##
# Private
##
def _popup_grab_window(self):
"""Grab pointer and keyboard on this window.
By grabbing the pointer and the keyboard, we will be able to
intercept key-press and button-press events.
"""
window = self.get_window()
grab_status = Gdk.pointer_grab(
window, True,
(Gdk.EventMask.BUTTON_PRESS_MASK |
Gdk.EventMask.BUTTON_RELEASE_MASK |
Gdk.EventMask.POINTER_MOTION_MASK),
None, None, 0L)
if grab_status == Gdk.GrabStatus.SUCCESS:
if Gdk.keyboard_grab(window, True, 0L) != Gdk.GrabStatus.SUCCESS:
display = window.get_display()
display.pointer_ungrab(0L)
return False
self.grab_add()
return True
def _get_position(self):
"""Get the position to show this popup."""
allocation = self._toggle_btn.get_allocation()
window = self._toggle_btn.get_window()
if self._toggle_btn.get_has_window():
x_coord = 0
y_coord = 0
else:
x_coord = allocation.x
y_coord = allocation.y
x, y = window.get_root_coords(x_coord, y_coord)
return x, y + allocation.height
def _get_view_options(self):
"""Build view options for datagrid."""
iters = {}
model = Gtk.ListStore(str, int)
iters[self.VIEW_TREE] = model.append(("Tree View", self.VIEW_TREE))
if self._controller.model.flat_column_idx is not None:
iters[self.VIEW_FLAT] = model.append(("Flat View", self.VIEW_FLAT))
if any(c['transform'] == 'image'
for c in self._controller.model.columns):
iters[self.VIEW_ICON] = model.append(("Icon View", self.VIEW_ICON))
# Avoid displaying the combo if there's only one option
if len(iters) == 1:
return None
combo = Gtk.ComboBox()
combo.set_model(model)
renderer = Gtk.CellRendererText()
combo.pack_start(renderer, True)
combo.add_attribute(renderer, 'text', 0)
if isinstance(self._controller.view, DataGridView):
if self._controller.model.active_params.get('flat', False):
combo.set_active_iter(iters[self.VIEW_FLAT])
else:
combo.set_active_iter(iters[self.VIEW_TREE])
elif isinstance(self._controller.view, DataGridIconView):
combo.set_active_iter(iters[self.VIEW_ICON])
else:
raise AssertionError("Unknown view type %r" % (
self._controller.view, ))
combo.connect('changed', self.on_combo_view_changed)
return combo
def _get_visibility_options(self):
"""Construct the switches based on the actual model columns."""
model = self._controller.model
hidden_columns = model.hidden_columns
for column in model.columns:
if column['name'] in hidden_columns:
continue
switch = Gtk.Switch()
label = Gtk.Label(column['display'])
switch.set_active(column['name'] in model.display_columns)
hbox = Gtk.HBox(spacing=5)
hbox.pack_start(switch, expand=False, fill=True, padding=0)
hbox.pack_start(label, expand=True, fill=True, padding=0)
switch.connect(
'notify::active',
self.on_column_switch_notify_active, column['name'])
yield hbox
##
# Callbacks
##
def on_key_press_event(self, window, event):
"""Handle key press events.
Popdown when the user presses Esc.
"""
if event.get_keyval()[1] == Gdk.KEY_Escape:
self.popdown()
return True
return False
def on_button_press_event(self, window, event):
"""Handle button press events.
Popdown when the user clicks on an area outside this window.
"""
event_rect = Gdk.Rectangle()
event_rect.x, event_rect.y = event.get_root_coords()
event_rect.width = 1
event_rect.height = 1
allocation = self.get_allocation()
window_rect = Gdk.Rectangle()
window_rect.x, window_rect.y = self._get_position()
window_rect.width = allocation.width
window_rect.height = allocation.height
intersection = Gdk.rectangle_intersect(
event_rect, window_rect)
# if the click was outside this window, hide it
if not intersection[0]:
self.popdown()
def on_combo_view_changed(self, widget):
"""Handle changes on the view combo.
Emit 'view-changed' for the given view.
:param widget: the combobox that received the event
:type widget: :class:`Gtk.ComboBox`
"""
model = widget.get_model()
value = model[widget.get_active()][1]
self.emit('view-changed', value)
self.popdown()
def on_toggle_button_toggled(self, widget):
"""Show switch list of columns to display.
:param widget: the ToggleButton that launches the list
:type widget: :class:`Gtk.ToggleButton`
"""
if widget.get_active():
self.popup()
else:
self.popdown()
def on_column_switch_notify_active(self, widget, p_spec, name):
"""Set the list of columns to display based on column checkboxes.
:param widget: checkbox widget for selected/deselected column
:type widget: :class:`Gtk.Switch`
:param str name: name of the column to add/remove from list
"""
self.emit('column-visibility-changed', name, widget.get_active())
class DataGridContainer(UIFile):
"""Provides UI container for tabular data TreeStore grid.
:param window: Window for main launching application -- needed for dialog
interaction
:type window: :class:`Gtk.Window`
"""
UI_FNAME = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'glade',
'datagrid.glade')
def __init__(self, window):
"""Set up container."""
self.window = window
UIFile.__init__(self, self.UI_FNAME)
class DataGridController(object):
"""UI controls to manipulate datagrid model/view.
:param container: ``UIFile`` instance providing ``Gtk.Box`` and
access to GTK widgets for controller
:type container: :class:`DataGridContainer`
:param data_source: Database backend instance
:type data_source: :class:`datagrid_gtk3.db.sqlite.SQLiteDataSource`
:param selected_record_callback:
Callback to call when a record is selected in the grid
:type selected_record_callback: `callable`
:param selected_record_callback:
Callback to call when an icon is activated on `DataGridIconView`
:type selected_record_callback: `callable`
:param bool has_checkboxes: Whether record rows have a checkbox
:param decode_fallback: Optional callable for converting objects to
strings in case `unicode(obj)` fails.
:type decode_fallback: callable
:param get_full_path: Callable for returning full paths to files when
given a relative path, or None if the file isn't available.
:type get_full_path: callable
"""
def __init__(self, container, data_source, selected_record_callback=None,
activated_icon_callback=None, activated_row_callback=None,
has_checkboxes=True, decode_fallback=None,
get_full_path=None):
"""Setup UI controls and load initial data view."""
self.extra_filter_widgets = {}
self.container = container
self.decode_fallback = decode_fallback if decode_fallback else repr
self.get_full_path = get_full_path
self.selected_record_callback = selected_record_callback
self.activated_icon_callback = activated_icon_callback
self.activated_row_callback = activated_row_callback
self.vscroll = container.grid_scrolledwindow.get_vadjustment()
self.vscroll.connect_after('value-changed', self.on_scrolled)
self.tree_view = DataGridView(None, has_checkboxes=has_checkboxes)
self.icon_view = DataGridIconView(None, has_checkboxes=has_checkboxes)
self.tree_view.connect('cursor-changed',
self.on_treeview_cursor_changed)
self.tree_view.connect('row-activated',
self.on_treeview_row_activated)
self.icon_view.connect('selection-changed',
self.on_iconview_selection_changed)
self.icon_view.connect('item-activated',
self.on_iconview_item_activated)
self.tree_view.connect('row-expanded',
self.on_tree_view_row_expanded)
self.tree_view.connect('row-collapsed',
self.on_tree_view_row_collapsed)
# The treview will be the default view
self.view = self.tree_view
self.container.grid_scrolledwindow.add(self.view)
cm = ImageCacheManager.get_default()
cm.connect('image-loaded', self.on_image_cache_manager_image_loaded)
# select columns toggle button
self.options_popup = OptionsPopup(
self.container.togglebutton_options, self)
self.options_popup.connect('column-visibility-changed',
self.on_popup_column_visibility_changed)
self.options_popup.connect('view-changed', self.on_popup_view_changed)
# date range widgets
icon_theme = Gtk.IconTheme.get_default()
for icon in ['calendar', 'stock_calendar']:
if icon_theme.has_icon(icon):
break
else:
# Should never happen, just a precaution
raise Exception("No suitable calendar icon found on theme")
for image in [self.container.image_start_date,
self.container.image_end_date]:
image.set_from_icon_name(icon, Gtk.IconSize.BUTTON)
self.date_start = DateEntry(self.container.window,
DateEntry.TYPE_START)
self.date_start.set_editable(False)
self.date_start.set_sensitive(False)
self.date_start.connect('date_changed', self.on_date_change, 'start')
# FIXME: ^^ use hyphen in signal name
self.container.vbox_start_date.pack_start(
self.date_start, expand=False, fill=True, padding=0)
self.date_end = DateEntry(self.container.window,
DateEntry.TYPE_END)
self.date_end.set_editable(False)
self.date_end.set_sensitive(False)
self.date_end.connect('date_changed', self.on_date_change, 'end')
self.container.vbox_end_date.pack_start(
self.date_end, expand=False, fill=True, padding=0)
# search widget
self.container.entry_search.connect('activate', self.on_search_clicked)
self.container.entry_search.connect(
'search-changed', self.on_search_clicked)
self.container.grid_vbox.show_all()
self.bind_datasource(data_source)
###
# Public
###
def bind_datasource(self, data_source):
"""Binds a data source to the datagrid.
:param data_source: The data source to bind.
:type data_source: :class:`datagrid_gtk3.db.DataSource`
"""
self.model = DataGridModel(data_source,
self.get_full_path,
self.decode_fallback)
self.model.connect('data-loaded', self.on_data_loaded)
for view in [self.tree_view, self.icon_view]:
view.model = self.model
liststore_date_cols = Gtk.ListStore(str, str, str)
if self.model.datetime_columns:
self.date_start.set_sensitive(True)
self.date_end.set_sensitive(True)
for column in self.model.datetime_columns:
liststore_date_cols.append(
(column['name'], column['display'], column['transform']))
combox_date_cols = self.container.combobox_date_columns
old_model = combox_date_cols.get_model()
if old_model:
del old_model
combox_date_cols.set_model(liststore_date_cols)
if not combox_date_cols.get_cells():
cell = Gtk.CellRendererText()
combox_date_cols.pack_start(cell, True)
combox_date_cols.add_attribute(cell, 'text', 1)
combox_date_cols.set_active(0)
combox_date_cols.connect('changed', self.on_date_change, None)
# Hide date column selection if there can be no choice
if len(liststore_date_cols) < 2:
combox_date_cols.hide()
self.container.date_column_label.hide()
else:
# They might have been hidden on a previous bind call.
combox_date_cols.show()
self.container.date_column_label.show()
# If the are no date columns, hide the date range controls as well
widgets = (
self.container.image_start_date,
self.container.vbox_start_date,
self.container.label_date_to,
self.container.image_end_date,
self.container.vbox_end_date,
self.container.filters_separator,
)
if len(liststore_date_cols) | |
# -*- coding: utf-8 -*-
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module containing the report stages."""
from __future__ import print_function
import datetime
import os
import sys
from infra_libs import ts_mon
from chromite.cbuildbot import cbuildbot_run
from chromite.cbuildbot import commands
from chromite.cbuildbot import goma_util
from chromite.cbuildbot import validation_pool
from chromite.cbuildbot.stages import completion_stages
from chromite.cbuildbot.stages import generic_stages
from chromite.lib.const import waterfall
from chromite.lib import cidb
from chromite.lib import config_lib
from chromite.lib import constants
from chromite.lib import clactions
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import failures_lib
from chromite.lib import git
from chromite.lib import gs
from chromite.lib import metadata_lib
from chromite.lib import metrics
from chromite.lib import osutils
from chromite.lib import patch as cros_patch
from chromite.lib import portage_util
from chromite.lib import results_lib
from chromite.lib import retry_stats
from chromite.lib import risk_report
from chromite.lib import toolchain
from chromite.lib import tree_status
from chromite.lib import triage_lib
site_config = config_lib.GetConfig()
def WriteBasicMetadata(builder_run):
"""Writes basic metadata that should be known at start of execution.
This method writes to |build_run|'s metadata instance the basic metadata
values that should be known at the beginning of the first cbuildbot
execution, prior to any reexecutions.
In particular, this method does not write any metadata values that depend
on the builder config, as the config may be modified by patches that are
applied before the final reexectuion. (exception: the config's name itself)
This method is safe to run more than once (for instance, once per cbuildbot
execution) because it will write the same data each time.
Args:
builder_run: The BuilderRun instance for this build.
"""
start_time = results_lib.Results.start_time
start_time_stamp = cros_build_lib.UserDateTimeFormat(timeval=start_time)
metadata = {
# Data for this build.
'bot-hostname': cros_build_lib.GetHostName(fully_qualified=True),
'build-number': builder_run.buildnumber,
'builder-name': builder_run.GetBuilderName(),
# This is something like https://uberchromegw.corp.google.com/i/chromeos/
# Note that we are phasing out using the buildbot UI, transitioning
# instead to luci-milo.
# Once we phase out completely, we can get rid of this metadata entry.
'buildbot-url': os.environ.get('BUILDBOT_BUILDBOTURL', ''),
'buildbot-master-name':
os.environ.get('BUILDBOT_MASTERNAME', ''),
'bot-config': builder_run.config['name'],
'time': {
'start': start_time_stamp,
},
'master_build_id': builder_run.options.master_build_id,
'suite_scheduling': builder_run.config['suite_scheduling'],
}
builder_run.attrs.metadata.UpdateWithDict(metadata)
def WriteTagMetadata(builder_run):
"""Add a 'tags' sub-dict to metadata.
This is a proof of concept for using tags to help find commonality
in failures.
"""
build_id, _ = builder_run.GetCIDBHandle()
# Yes, these values match general metadata values, but they are just
# proof of concept, so far.
tags = {
'bot_config': builder_run.config['name'],
'bot_hostname': cros_build_lib.GetHostName(fully_qualified=True),
'build_id': build_id,
'build_number': builder_run.buildnumber,
'builder_name': builder_run.GetBuilderName(),
'buildbot_url': os.environ.get('BUILDBOT_BUILDBOTURL', ''),
'buildbot_master_name':
os.environ.get('BUILDBOT_MASTERNAME', ''),
'id': ('Build', build_id),
'master_build_id': builder_run.options.master_build_id,
'important': builder_run.config['important'],
}
# Guess type of bot.
tags['bot_type'] = 'unknown'
if '.golo.' in tags['bot_hostname']:
tags['bot_type'] = 'golo'
else:
gce_types = ['beefy', 'standard', 'wimpy']
for t in gce_types:
host_string = 'cros-%s' % t
if host_string in tags['bot_hostname']:
tags['bot_type'] = 'gce-%s' % t
break
# Look up the git version.
try:
cmd_result = cros_build_lib.RunCommand(['git', '--version'],
capture_output=True)
tags['git_version'] = cmd_result.output.strip()
except cros_build_lib.RunCommandError:
pass # If we fail, just don't include the tag.
# Look up the repo version.
try:
cmd_result = cros_build_lib.RunCommand(['repo', '--version'],
capture_output=True)
# Convert the following output into 'v1.12.17-cr3':
#
# repo version v1.12.17-cr3
# (from https://chromium.googlesource.com/external/repo.git)
# repo launcher version 1.21
# (from /usr/local/google/home/dgarrett/sand/depot_tools/repo)
# git version 2.8.0.rc3.226.g39d4020
# Python 2.7.6 (default, Jun 22 2015, 17:58:13)
# [GCC 4.8.2]
tags['repo_version'] = cmd_result.output.splitlines()[0].split(' ')[-1]
except (cros_build_lib.RunCommandError, IndexError):
pass # If we fail, just don't include the tag.
builder_run.attrs.metadata.UpdateKeyDictWithDict(constants.METADATA_TAGS,
tags)
def GetChildConfigListMetadata(child_configs, config_status_map):
"""Creates a list for the child configs metadata.
This creates a list of child config dictionaries from the given child
configs, optionally adding the final status if the success map is
specified.
Args:
child_configs: The list of child configs for this build.
config_status_map: The map of config name to final build status.
Returns:
List of child config dictionaries, with optional final status
"""
child_config_list = []
for c in child_configs:
pass_fail_status = None
if config_status_map:
if config_status_map[c['name']]:
pass_fail_status = constants.BUILDER_STATUS_PASSED
else:
pass_fail_status = constants.BUILDER_STATUS_FAILED
child_config_list.append({'name': c['name'],
'boards': c['boards'],
'status': pass_fail_status})
return child_config_list
def _UploadAndLinkGomaLogIfNecessary(
stage_name, goma_dir, goma_client_json, goma_tmp_dir):
"""Uploads the logs for goma, if needed. Also create a link to the visualizer.
If |goma_tmp_dir| is given, |goma_dir| and |goma_client_json| must not be
None.
Args:
stage_name: Name of the stage where goma is used.
goma_dir: Path to goma installed directory.
goma_client_json: Path to the service account json file.
goma_tmp_dir: Goma's working directory.
"""
if not goma_tmp_dir:
return
goma = goma_util.Goma(goma_dir, goma_client_json, goma_tmp_dir=goma_tmp_dir)
# Just in case, stop the goma. E.g. In case of timeout, we do not want to
# keep goma compiler_proxy running.
goma.Stop()
goma_urls = goma.UploadLogs()
if goma_urls:
for label, url in goma_urls:
logging.PrintBuildbotLink('%s %s' % (stage_name, label), url)
class BuildStartStage(generic_stages.BuilderStage):
"""The first stage to run.
This stage writes a few basic metadata values that are known at the start of
build, and inserts the build into the database, if appropriate.
"""
def _GetBuildTimeoutSeconds(self):
"""Get the overall build timeout to be published to cidb.
Returns:
Timeout in seconds. None if no sensible timeout can be inferred.
"""
timeout_seconds = self._run.options.timeout
if self._run.config.master:
master_timeout = self._run.config.build_timeout
if timeout_seconds > 0:
master_timeout = min(master_timeout, timeout_seconds)
return master_timeout
return timeout_seconds if timeout_seconds > 0 else None
@failures_lib.SetFailureType(failures_lib.InfrastructureFailure)
def PerformStage(self):
if self._run.config['doc']:
logging.PrintBuildbotLink('Builder documentation',
self._run.config['doc'])
WriteBasicMetadata(self._run)
# This is a heuristic value for |important|, since patches that get applied
# later in the build might change the config. We write it now anyway,
# because in case the build fails before Sync, it is better to have this
# heuristic value than None. In BuildReexecutionFinishedStage, we re-write
# the definitive value.
self._run.attrs.metadata.UpdateWithDict(
{'important': self._run.config['important']})
d = self._run.attrs.metadata.GetDict()
# BuildStartStage should only run once per build. But just in case it
# is somehow running a second time, we do not want to insert an additional
# database entry. Detect if a database entry has been inserted already
# and if so quit the stage.
if 'build_id' in d:
logging.info('Already have build_id %s, not inserting an entry.',
d['build_id'])
return
# Note: In other build stages we use self._run.GetCIDBHandle to fetch
# a cidb handle. However, since we don't yet have a build_id, we can't
# do that here.
if cidb.CIDBConnectionFactory.IsCIDBSetup():
db_type = cidb.CIDBConnectionFactory.GetCIDBConnectionType()
db = cidb.CIDBConnectionFactory.GetCIDBConnectionForBuilder()
if db:
wfall = d['buildbot-master-name']
try:
build_id = db.InsertBuild(
builder_name=d['builder-name'],
waterfall=wfall,
build_number=d['build-number'],
build_config=d['bot-config'],
bot_hostname=d['bot-hostname'],
master_build_id=d['master_build_id'],
timeout_seconds=self._GetBuildTimeoutSeconds(),
important=d['important'],
buildbucket_id=self._run.options.buildbucket_id)
except Exception as e:
logging.error('Error: %s\n If the buildbucket_id to insert is '
'duplicated to the buildbucket_id of an old build and '
'the old build was canceled because of a waterfall '
'master restart, please ignore this error. Else, '
'the error needs more investigation. More context: '
'crbug.com/679974 and crbug.com/685889', e)
raise e
self._run.attrs.metadata.UpdateWithDict({'build_id': build_id,
'db_type': db_type})
logging.info('Inserted build_id %s into cidb database type %s.',
build_id, db_type)
logging.PrintBuildbotStepText('database: %s, build_id: %s' %
(db_type, build_id))
master_build_id = d['master_build_id']
if master_build_id is not None:
master_build_status = db.GetBuildStatus(master_build_id)
if master_build_status['buildbucket_id']:
master_url = tree_status.ConstructLegolandBuildURL(
master_build_status['buildbucket_id'])
else:
master_url = tree_status.ConstructDashboardURL(
master_build_status['waterfall'],
master_build_status['builder_name'],
master_build_status['build_number'])
logging.PrintBuildbotLink('Link to master build', master_url)
# Write the tag metadata last so that a build_id is available.
WriteTagMetadata(self._run)
def HandleSkip(self):
"""Ensure that re-executions use the same db instance as initial db."""
metadata_dict = self._run.attrs.metadata.GetDict()
if 'build_id' in metadata_dict:
db_type = cidb.CIDBConnectionFactory.GetCIDBConnectionType()
if not 'db_type' in metadata_dict:
# This will only execute while this CL is in the commit queue. After
# this CL lands, this block can be removed.
self._run.attrs.metadata.UpdateWithDict({'db_type': db_type})
return
if db_type != metadata_dict['db_type']:
cidb.CIDBConnectionFactory.InvalidateCIDBSetup()
raise AssertionError('Invalid attempt to switch from database %s to '
'%s.' % (metadata_dict['db_type'], db_type))
class SlaveFailureSummaryStage(generic_stages.BuilderStage):
"""Stage which summarizes and links to the failures of slave builds."""
@failures_lib.SetFailureType(failures_lib.InfrastructureFailure)
def PerformStage(self):
if not self._run.config.master:
logging.info('This stage is only meaningful for master builds. '
'Doing nothing.')
return
build_id, db = self._run.GetCIDBHandle()
if not db:
logging.info('No cidb connection for this build. '
'Doing nothing.')
return
slave_buildbucket_ids = self.GetScheduledSlaveBuildbucketIds()
slave_failures = db.GetSlaveFailures(
build_id, buildbucket_ids=slave_buildbucket_ids)
failures_by_build = cros_build_lib.GroupNamedtuplesByKey(
slave_failures, 'build_id')
for build_id, build_failures in sorted(failures_by_build.items()):
failures_by_stage = cros_build_lib.GroupNamedtuplesByKey(
build_failures, 'build_stage_id')
# Surface a link to each slave stage that failed, in stage_id sorted
# order.
for stage_id in sorted(failures_by_stage):
failure = failures_by_stage[stage_id][0]
# Ignore | |
= _dict.get('offer_id')
else:
raise ValueError('Required property \'offer_id\' not present in Offer JSON')
if 'credits_total' in _dict:
args['credits_total'] = _dict.get('credits_total')
else:
raise ValueError('Required property \'credits_total\' not present in Offer JSON')
if 'offer_template' in _dict:
args['offer_template'] = _dict.get('offer_template')
else:
raise ValueError('Required property \'offer_template\' not present in Offer JSON')
if 'valid_from' in _dict:
args['valid_from'] = string_to_datetime(_dict.get('valid_from'))
else:
raise ValueError('Required property \'valid_from\' not present in Offer JSON')
if 'expires_on' in _dict:
args['expires_on'] = string_to_datetime(_dict.get('expires_on'))
else:
raise ValueError('Required property \'expires_on\' not present in Offer JSON')
if 'credits' in _dict:
args['credits'] = OfferCredits.from_dict(_dict.get('credits'))
else:
raise ValueError('Required property \'credits\' not present in Offer JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Offer object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'offer_id') and self.offer_id is not None:
_dict['offer_id'] = self.offer_id
if hasattr(self, 'credits_total') and self.credits_total is not None:
_dict['credits_total'] = self.credits_total
if hasattr(self, 'offer_template') and self.offer_template is not None:
_dict['offer_template'] = self.offer_template
if hasattr(self, 'valid_from') and self.valid_from is not None:
_dict['valid_from'] = datetime_to_string(self.valid_from)
if hasattr(self, 'expires_on') and self.expires_on is not None:
_dict['expires_on'] = datetime_to_string(self.expires_on)
if hasattr(self, 'credits') and self.credits is not None:
_dict['credits'] = self.credits.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Offer object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'Offer') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Offer') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class OfferCredits():
"""
Credit information related to an offer.
:attr float starting_balance: The available credits in the offer at the
beginning of the month.
:attr float used: The credits used in this month.
:attr float balance: The remaining credits in the offer.
"""
def __init__(self,
starting_balance: float,
used: float,
balance: float) -> None:
"""
Initialize a OfferCredits object.
:param float starting_balance: The available credits in the offer at the
beginning of the month.
:param float used: The credits used in this month.
:param float balance: The remaining credits in the offer.
"""
self.starting_balance = starting_balance
self.used = used
self.balance = balance
@classmethod
def from_dict(cls, _dict: Dict) -> 'OfferCredits':
"""Initialize a OfferCredits object from a json dictionary."""
args = {}
if 'starting_balance' in _dict:
args['starting_balance'] = _dict.get('starting_balance')
else:
raise ValueError('Required property \'starting_balance\' not present in OfferCredits JSON')
if 'used' in _dict:
args['used'] = _dict.get('used')
else:
raise ValueError('Required property \'used\' not present in OfferCredits JSON')
if 'balance' in _dict:
args['balance'] = _dict.get('balance')
else:
raise ValueError('Required property \'balance\' not present in OfferCredits JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a OfferCredits object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'starting_balance') and self.starting_balance is not None:
_dict['starting_balance'] = self.starting_balance
if hasattr(self, 'used') and self.used is not None:
_dict['used'] = self.used
if hasattr(self, 'balance') and self.balance is not None:
_dict['balance'] = self.balance
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this OfferCredits object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'OfferCredits') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'OfferCredits') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class OrgUsage():
"""
The aggregated usage and charges for all the plans in the org.
:attr str account_id: The ID of the account.
:attr str organization_id: The ID of the organization.
:attr str organization_name: (optional) The name of the organization.
:attr str pricing_country: The target country pricing that should be used.
:attr str currency_code: The currency for the cost fields in the resources,
plans and metrics.
:attr str month: The month.
:attr List[Resource] resources: All the resource used in the account.
"""
def __init__(self,
account_id: str,
organization_id: str,
pricing_country: str,
currency_code: str,
month: str,
resources: List['Resource'],
*,
organization_name: str = None) -> None:
"""
Initialize a OrgUsage object.
:param str account_id: The ID of the account.
:param str organization_id: The ID of the organization.
:param str pricing_country: The target country pricing that should be used.
:param str currency_code: The currency for the cost fields in the
resources, plans and metrics.
:param str month: The month.
:param List[Resource] resources: All the resource used in the account.
:param str organization_name: (optional) The name of the organization.
"""
self.account_id = account_id
self.organization_id = organization_id
self.organization_name = organization_name
self.pricing_country = pricing_country
self.currency_code = currency_code
self.month = month
self.resources = resources
@classmethod
def from_dict(cls, _dict: Dict) -> 'OrgUsage':
"""Initialize a OrgUsage object from a json dictionary."""
args = {}
if 'account_id' in _dict:
args['account_id'] = _dict.get('account_id')
else:
raise ValueError('Required property \'account_id\' not present in OrgUsage JSON')
if 'organization_id' in _dict:
args['organization_id'] = _dict.get('organization_id')
else:
raise ValueError('Required property \'organization_id\' not present in OrgUsage JSON')
if 'organization_name' in _dict:
args['organization_name'] = _dict.get('organization_name')
if 'pricing_country' in _dict:
args['pricing_country'] = _dict.get('pricing_country')
else:
raise ValueError('Required property \'pricing_country\' not present in OrgUsage JSON')
if 'currency_code' in _dict:
args['currency_code'] = _dict.get('currency_code')
else:
raise ValueError('Required property \'currency_code\' not present in OrgUsage JSON')
if 'month' in _dict:
args['month'] = _dict.get('month')
else:
raise ValueError('Required property \'month\' not present in OrgUsage JSON')
if 'resources' in _dict:
args['resources'] = [Resource.from_dict(x) for x in _dict.get('resources')]
else:
raise ValueError('Required property \'resources\' not present in OrgUsage JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a OrgUsage object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'account_id') and self.account_id is not None:
_dict['account_id'] = self.account_id
if hasattr(self, 'organization_id') and self.organization_id is not None:
_dict['organization_id'] = self.organization_id
if hasattr(self, 'organization_name') and self.organization_name is not None:
_dict['organization_name'] = self.organization_name
if hasattr(self, 'pricing_country') and self.pricing_country is not None:
_dict['pricing_country'] = self.pricing_country
if hasattr(self, 'currency_code') and self.currency_code is not None:
_dict['currency_code'] = self.currency_code
if hasattr(self, 'month') and self.month is not None:
_dict['month'] = self.month
if hasattr(self, 'resources') and self.resources is not None:
_dict['resources'] = [x.to_dict() for x in self.resources]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this OrgUsage object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'OrgUsage') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'OrgUsage') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Plan():
"""
The aggregated values for the plan.
:attr str plan_id: The ID of the plan.
:attr str plan_name: (optional) The name of the plan.
:attr str pricing_region: (optional) The pricing region for the plan.
:attr bool billable: Indicates if the plan charges are billed to the customer.
:attr float cost: The total cost incurred by the plan.
:attr float rated_cost: Total pre-discounted cost incurred by the plan.
:attr List[Metric] usage: All the metrics in the plan.
:attr List[Discount] discounts: All the discounts applicable to the plan.
"""
def __init__(self,
plan_id: str,
billable: bool,
cost: float,
rated_cost: float,
usage: List['Metric'],
discounts: List['Discount'],
*,
plan_name: str = None,
pricing_region: str = None) -> None:
"""
Initialize a Plan object.
:param str plan_id: The ID of the plan.
:param bool billable: Indicates if the plan charges are billed to the
customer.
:param float cost: The total cost incurred by the plan.
:param float rated_cost: Total pre-discounted cost incurred by the plan.
:param List[Metric] usage: All the metrics in the plan.
:param List[Discount] discounts: All the discounts applicable to the plan.
:param str plan_name: (optional) The name of the plan.
:param str pricing_region: | |
<gh_stars>1-10
"""
sim/gather.py
Functions related to gathering data from nodes after the simulation
Contributors: <EMAIL>
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import zip
from future import standard_library
standard_library.install_aliases()
import numpy as np
from ..specs import Dict, ODict
#------------------------------------------------------------------------------
# Gather data from nodes
#------------------------------------------------------------------------------
def gatherData (gatherLFP = True):
from .. import sim
sim.timing('start', 'gatherTime')
## Pack data from all hosts
if sim.rank==0:
print('\nGathering data...')
# flag to avoid saving sections data for each cell (saves gather time and space; cannot inspect cell secs or re-simulate)
if not sim.cfg.saveCellSecs:
for cell in sim.net.cells:
cell.secs = None
cell.secLists = None
# flag to avoid saving conns data for each cell (saves gather time and space; cannot inspect cell conns or re-simulate)
if not sim.cfg.saveCellConns:
for cell in sim.net.cells:
cell.conns = []
# Store conns in a compact list format instead of a long dict format (cfg.compactConnFormat contains list of keys to include)
elif sim.cfg.compactConnFormat:
sim.compactConnFormat()
# remove data structures used to calculate LFP
if gatherLFP and sim.cfg.recordLFP and hasattr(sim.net, 'compartCells') and sim.cfg.createNEURONObj:
for cell in sim.net.compartCells:
try:
del cell.imembVec
del cell.imembPtr
del cell._segCoords
except:
pass
for pop in list(sim.net.pops.values()):
try:
del pop._morphSegCoords
except:
pass
simDataVecs = ['spkt','spkid','stims', 'dipole']+list(sim.cfg.recordTraces.keys())
singleNodeVecs = ['t']
if sim.nhosts > 1: # only gather if >1 nodes
netPopsCellGids = {popLabel: list(pop.cellGids) for popLabel,pop in sim.net.pops.items()}
# gather only sim data
if getattr(sim.cfg, 'gatherOnlySimData', False):
nodeData = {'simData': sim.simData}
data = [None]*sim.nhosts
data[0] = {}
for k,v in nodeData.items():
data[0][k] = v
gather = sim.pc.py_alltoall(data)
sim.pc.barrier()
if sim.rank == 0: # simData
print(' Gathering only sim data...')
sim.allSimData = Dict()
for k in list(gather[0]['simData'].keys()): # initialize all keys of allSimData dict
if gatherLFP and k == 'LFP':
sim.allSimData[k] = np.zeros((gather[0]['simData']['LFP'].shape))
else:
sim.allSimData[k] = {}
for key in singleNodeVecs: # store single node vectors (eg. 't')
sim.allSimData[key] = list(nodeData['simData'][key])
# fill in allSimData taking into account if data is dict of h.Vector (code needs improvement to be more generic)
for node in gather: # concatenate data from each node
for key,val in node['simData'].items(): # update simData dics of dics of h.Vector
if key in simDataVecs: # simData dicts that contain Vectors
if isinstance(val, dict):
for cell,val2 in val.items():
if isinstance(val2,dict):
sim.allSimData[key].update(Dict({cell:Dict()}))
for stim,val3 in val2.items():
sim.allSimData[key][cell].update({stim:list(val3)}) # udpate simData dicts which are dicts of dicts of Vectors (eg. ['stim']['cell_1']['backgrounsd']=h.Vector)
else:
sim.allSimData[key].update({cell:list(val2)}) # udpate simData dicts which are dicts of Vectors (eg. ['v']['cell_1']=h.Vector)
else:
sim.allSimData[key] = list(sim.allSimData[key])+list(val) # udpate simData dicts which are Vectors
elif gatherLFP and key == 'LFP':
sim.allSimData[key] += np.array(val)
elif key not in singleNodeVecs:
sim.allSimData[key].update(val) # update simData dicts which are not Vectors
if len(sim.allSimData['spkt']) > 0:
sim.allSimData['spkt'], sim.allSimData['spkid'] = zip(*sorted(zip(sim.allSimData['spkt'], sim.allSimData['spkid']))) # sort spks
sim.allSimData['spkt'], sim.allSimData['spkid'] = list(sim.allSimData['spkt']), list(sim.allSimData['spkid'])
sim.net.allPops = ODict() # pops
for popLabel,pop in sim.net.pops.items(): sim.net.allPops[popLabel] = pop.__getstate__() # can't use dict comprehension for OrderedDict
sim.net.allCells = [c.__dict__ for c in sim.net.cells]
# gather cells, pops and sim data
else:
nodeData = {'netCells': [c.__getstate__() for c in sim.net.cells], 'netPopsCellGids': netPopsCellGids, 'simData': sim.simData}
data = [None]*sim.nhosts
data[0] = {}
for k,v in nodeData.items():
data[0][k] = v
#print data
gather = sim.pc.py_alltoall(data)
sim.pc.barrier()
if sim.rank == 0:
allCells = []
allPops = ODict()
for popLabel,pop in sim.net.pops.items(): allPops[popLabel] = pop.__getstate__() # can't use dict comprehension for OrderedDict
allPopsCellGids = {popLabel: [] for popLabel in netPopsCellGids}
sim.allSimData = Dict()
for k in list(gather[0]['simData'].keys()): # initialize all keys of allSimData dict
if gatherLFP and k == 'LFP':
sim.allSimData[k] = np.zeros((gather[0]['simData']['LFP'].shape))
else:
sim.allSimData[k] = {}
for key in singleNodeVecs: # store single node vectors (eg. 't')
sim.allSimData[key] = list(nodeData['simData'][key])
# fill in allSimData taking into account if data is dict of h.Vector (code needs improvement to be more generic)
for node in gather: # concatenate data from each node
allCells.extend(node['netCells']) # extend allCells list
for popLabel,popCellGids in node['netPopsCellGids'].items():
allPopsCellGids[popLabel].extend(popCellGids)
for key,val in node['simData'].items(): # update simData dics of dics of h.Vector
if key in simDataVecs: # simData dicts that contain Vectors
if isinstance(val,dict):
for cell,val2 in val.items():
if isinstance(val2,dict):
sim.allSimData[key].update(Dict({cell:Dict()}))
for stim,val3 in val2.items():
sim.allSimData[key][cell].update({stim:list(val3)}) # udpate simData dicts which are dicts of dicts of Vectors (eg. ['stim']['cell_1']['backgrounsd']=h.Vector)
else:
sim.allSimData[key].update({cell:list(val2)}) # udpate simData dicts which are dicts of Vectors (eg. ['v']['cell_1']=h.Vector)
else:
sim.allSimData[key] = list(sim.allSimData[key])+list(val) # udpate simData dicts which are Vectors
elif gatherLFP and key == 'LFP':
sim.allSimData[key] += np.array(val)
elif key not in singleNodeVecs:
sim.allSimData[key].update(val) # update simData dicts which are not Vectors
if len(sim.allSimData['spkt']) > 0:
sim.allSimData['spkt'], sim.allSimData['spkid'] = zip(*sorted(zip(sim.allSimData['spkt'], sim.allSimData['spkid']))) # sort spks
sim.allSimData['spkt'], sim.allSimData['spkid'] = list(sim.allSimData['spkt']), list(sim.allSimData['spkid'])
sim.net.allCells = sorted(allCells, key=lambda k: k['gid'])
for popLabel,pop in allPops.items():
pop['cellGids'] = sorted(allPopsCellGids[popLabel])
sim.net.allPops = allPops
# clean to avoid mem leaks
for node in gather:
if node:
node.clear()
del node
for item in data:
if item:
item.clear()
del item
else: # if single node, save data in same format as for multiple nodes for consistency
if sim.cfg.createNEURONObj:
sim.net.allCells = [Dict(c.__getstate__()) for c in sim.net.cells]
else:
sim.net.allCells = [c.__dict__ for c in sim.net.cells]
sim.net.allPops = ODict()
for popLabel,pop in sim.net.pops.items(): sim.net.allPops[popLabel] = pop.__getstate__() # can't use dict comprehension for OrderedDict
sim.allSimData = Dict()
for k in list(sim.simData.keys()): # initialize all keys of allSimData dict
sim.allSimData[k] = Dict()
for key,val in sim.simData.items(): # update simData dics of dics of h.Vector
if key in simDataVecs+singleNodeVecs: # simData dicts that contain Vectors
if isinstance(val,dict):
for cell,val2 in val.items():
if isinstance(val2,dict):
sim.allSimData[key].update(Dict({cell:Dict()}))
for stim,val3 in val2.items():
sim.allSimData[key][cell].update({stim:list(val3)}) # udpate simData dicts which are dicts of dicts of Vectors (eg. ['stim']['cell_1']['backgrounsd']=h.Vector)
else:
sim.allSimData[key].update({cell:list(val2)}) # udpate simData dicts which are dicts of Vectors (eg. ['v']['cell_1']=h.Vector)
else:
sim.allSimData[key] = list(sim.allSimData[key])+list(val) # udpate simData dicts which are Vectors
else:
sim.allSimData[key] = val # update simData dicts which are not Vectors
## Print statistics
sim.pc.barrier()
if sim.rank == 0:
sim.timing('stop', 'gatherTime')
if sim.cfg.timing: print((' Done; gather time = %0.2f s.' % sim.timingData['gatherTime']))
print('\nAnalyzing...')
sim.totalSpikes = len(sim.allSimData['spkt'])
sim.totalSynapses = sum([len(cell['conns']) for cell in sim.net.allCells])
if sim.cfg.createPyStruct:
if sim.cfg.compactConnFormat:
preGidIndex = sim.cfg.compactConnFormat.index('preGid') if 'preGid' in sim.cfg.compactConnFormat else 0
sim.totalConnections = sum([len(set([conn[preGidIndex] for conn in cell['conns']])) for cell in sim.net.allCells])
else:
sim.totalConnections = sum([len(set([conn['preGid'] for conn in cell['conns']])) for cell in sim.net.allCells])
else:
sim.totalConnections = sim.totalSynapses
sim.numCells = len(sim.net.allCells)
if sim.totalSpikes > 0:
sim.firingRate = float(sim.totalSpikes)/sim.numCells/sim.cfg.duration*1e3 # Calculate firing rate
else:
sim.firingRate = 0
if sim.numCells > 0:
sim.connsPerCell = sim.totalConnections/float(sim.numCells) # Calculate the number of connections per cell
sim.synsPerCell = sim.totalSynapses/float(sim.numCells) # Calculate the number of connections per cell
else:
sim.connsPerCell = 0
sim.synsPerCell = 0
print((' Cells: %i' % (sim.numCells) ))
print((' Connections: %i (%0.2f per cell)' % (sim.totalConnections, sim.connsPerCell)))
if sim.totalSynapses != sim.totalConnections:
print((' Synaptic contacts: %i (%0.2f per cell)' % (sim.totalSynapses, sim.synsPerCell)))
if 'runTime' in sim.timingData:
print((' Spikes: %i (%0.2f Hz)' % (sim.totalSpikes, sim.firingRate)))
if sim.cfg.printPopAvgRates and not sim.cfg.gatherOnlySimData:
trange = sim.cfg.printPopAvgRates if isinstance(sim.cfg.printPopAvgRates,list) else None
sim.allSimData['popRates'] = sim.analysis.popAvgRates(trange=trange)
print((' Simulated time: %0.1f s; %i workers' % (sim.cfg.duration/1e3, sim.nhosts)))
print((' Run time: %0.2f s' % (sim.timingData['runTime'])))
sim.allSimData['avgRate'] = sim.firingRate # save firing rate
return sim.allSimData
#------------------------------------------------------------------------------
# Gathers simData from filess
#------------------------------------------------------------------------------
def fileGather (gatherLFP = True):
import os, pickle
from .. import sim
sim.timing('start', 'gatherTime')
# iterate through the saved files and concat their data
fileData = Dict()
if sim.rank == 0:
for f in os.listdir('temp'):
with open('temp/' + f, 'rb') as data:
temp = pickle.load(data)
for k in temp.keys():
if k in fileData:
if isinstance(temp[k], list):
fileData[k] = fileData[k] + temp[k]
elif isinstance(temp[k], dict):
fileData[k].update(temp[k])
else:
fileData[k] = temp[k]
simDataVecs = ['spkt','spkid','stims']+list(sim.cfg.recordTraces.keys())
singleNodeVecs = ['t']
if sim.rank == 0:
sim.allSimData = Dict()
sim.allSimData.update(fileData)
if len(sim.allSimData['spkt']) > 0:
sim.allSimData['spkt'], sim.allSimData['spkid'] = zip(*sorted(zip(sim.allSimData['spkt'], sim.allSimData['spkid']))) # sort spks
sim.allSimData['spkt'], sim.allSimData['spkid'] = list(sim.allSimData['spkt']), list(sim.allSimData['spkid'])
# 1 get the right data, now check that we have right amount
# 2 use that data rather than gathering later
## Pack data from all hosts
if sim.rank==0:
print('\nGathering data from files...')
# | |
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__name__ = "Phoniebox"
import configparser # needed only for the exception types ?!
from ConfigParserExtended import ConfigParserExtended
import codecs
import subprocess # needed for aplay call
import os,sys
from time import sleep
from mpd import MPDClient
# get absolute path of this script
dir_path = os.path.dirname(os.path.realpath(__file__))
defaultconfigFilePath = os.path.join(dir_path,'./phoniebox.conf')
# TODO: externalize helper functions for the package. How?
def is_int(s):
""" return True if string is an int """
try:
int(s)
return True
except ValueError:
return False
def str2bool(s):
""" convert string to a python boolean """
return s.lower() in ("yes", "true", "t", "1")
def str2num(s):
""" convert string to an int or a float """
try:
return int(s)
except ValueError:
return float(s)
def find_modified_files(path,since):
modified_files = []
for root, dirs, files in os.walk(path):
for basename in files:
filename = os.path.join(path, basename)
status = os.stat(filename)
if status.st_mtime > since:
modified_files.append(filename)
return modified_files
def file_modified(filename,since):
if os.stat(filename).st_mtime > since:
return True
else:
return False
class Phoniebox(object):
def __init__(self,configFilePath=defaultconfigFilePath):
print("Using configuration file {}".format(configFilePath))
self.read_config(configFilePath)
# read cardAssignments from given card assignments file
card_assignments_file = self.get_setting("phoniebox","card_assignments_file")
self.cardAssignments = self.read_cardAssignments()
if self.get_setting("phoniebox","translate_legacy_cardassignments","bool") == True:
self.log("Translating legacy cardAssignment config from folder.conf files.",3)
legacy_cardAssignments = self.translate_legacy_cardAssignments()
self.update_cardAssignments(legacy_cardAssignments)
def log(self,msg,level=3):
""" level based logging to stdout """
log_level_map = {0:None,1:"error",2:"warning",3:"info",4:"extended",5:"debug"}
log_level = int(self.get_setting("phoniebox","log_level"))
if log_level >= level and log_level != -1:
print("{}: {}".format(log_level_map[level].upper(),msg))
def mpd_init_connection(self):
""" connect to mpd """
host = self.get_setting("mpd","host")
if host == -1:
host = "localhost"
port = self.get_setting("mpd","port")
if port == -1:
port = 6600
timeout = self.get_setting("mpd","timeout")
if timeout == -1:
timeout = 3
self.client = MPDClient()
self.client.host = host
self.client.port = port
self.client.timeout = timeout
#ret = self.mpd_connect_timeout()
if self.mpd_connect_timeout() != 0:
sys.exit()
else:
self.log("connected to MPD with settings host = {}, port = {}, timeout = {}".format(host,port,timeout),3)
def mpd_connect_timeout(self):
""" establishes the connection to MPD when disconnected """
success = False
runtime = 0
try:
self.client.disconnect()
except:
pass
while success != True and runtime <= self.client.timeout:
try:
self.client.connect(self.client.host,self.client.port)
success = True
self.log("Connected to MPD at {} on port {}.".format(self.client.host,self.client.port),5)
return 0
except:
self.log("Could not connect to MPD, retrying.",5)
sleep(0.2)
runtime += 0.2
if runtime >= self.client.timeout:
self.log("Could not connect to MPD for {}s, giving up.".format(self.client.timeout),2)
return 1
def do_second_swipe(self):
""" react to the second swipe of the same card according to settings"""
second_swipe_map = { 'default': self.do_restart_playlist,
'restart': self.do_restart_playlist,
'restart_track':self.do_restart_track,
'stop': self.do_stop,
'pause': self.do_toggle,
'noaudioplay': self.do_pass,
'skipnext': self.do_next,
}
setting_key = "second_swipe"
map_key = self.config.get("phoniebox",setting_key)
try:
second_swipe_map[map_key]()
except KeyError as e:
self.log("Unknown setting \"{} = {}\", using \"{} = default\".".format(setting_key,map_key,setting_key),5)
second_swipe_map['default']()
def do_restart_playlist(self):
""" restart the same playlist from the beginning """
# TODO: Any reason not to just start the first item in the current playlist?
self.mpd_connect_timeout()
self.set_mpd_playmode(self.lastplayedID)
self.play_mpd(self.get_cardsetting(self.lastplayedID,"uri"))
def do_restart_track(self):
""" restart currently playing track """
self.mpd_connect_timeout()
mpd_status = self.client.status()
self.set_mpd_playmode(self.lastplayedID)
# restart current track
self.client.play(mpd_status['song'])
def do_start_playlist(self,cardid):
""" restart the same playlist, eventually resume """
if self.get_cardsetting(self.lastplayedID,"resume"):
self.resume(self.lastplayedID,"save")
self.mpd_connect_timeout()
self.set_mpd_playmode(cardid)
self.play_mpd(self.get_cardsetting(cardid,"uri"))
if self.get_cardsetting(cardid,"resume"):
self.resume(cardid,"resume")
self.lastplayedID = cardid
def do_toggle(self):
""" toggle play/pause """
self.mpd_connect_timeout()
status = self.client.status()
if status['state'] == "play":
self.client.pause()
else:
self.client.play()
def do_pass(self):
""" do nothing (on second swipe with noaudioplay) """
pass
def do_next(self):
""" skip to next track or restart playlist if stopped (on second swipe with noaudioplay) """
self.mpd_connect_timeout()
status = self.client.status()
# start playlist if in stop state or there is only one song in the playlist (virtually loop)
if (status["state"] == "stop") or (status["playlistlength"] == "1"):
self.do_restart_playlist()
else:
self.client.next()
def do_stop(self):
""" do nothing (on second swipe with noaudioplay) """
self.mpd_connect_timeout()
self.client.stop()
def play_alsa(self,audiofile):
""" pause mpd and play file on alsa player """
self.mpd_connect_timeout()
self.client.pause()
# TODO: use the standard audio device or set them via phoniebox.conf
subprocess.call(["aplay -q -Dsysdefault:CARD=sndrpijustboomd " + audiofile], shell=True)
subprocess.call(["aplay -q -Dsysdefault " + audiofile], shell=True)
def play_mpd(self,uri):
""" play uri in mpd """
self.mpd_connect_timeout()
self.client.clear()
self.client.add(uri)
self.client.play()
self.log("phoniebox: playing {}".format(uri.encode('utf-8')),3)
# TODO: is there a better way to check for "value not present" than to return -1?
def get_setting(self,section,key,opt_type="string"):
""" get a setting from configFile file or cardAssignmentsFile
if not present, return -1
"""
try:
num = str2num(section)
parser = self.cardAssignments
except ValueError:
parser = self.config
try:
opt = parser.get(section,key)
except configparser.NoOptionError:
print("No option {} in section {}".format(key,section))
return -1
except configparser.NoSectionError:
print("No section {}".format(section))
return -1
if "bool" in opt_type.lower():
return str2bool(opt)
else:
try:
return str2num(opt)
except ValueError:
return opt
def get_cardsetting(self,cardid,key,opt_type="string"):
""" catches Errors """
return self.get_setting(cardid,key,opt_type)
def mpd_init_settings(self):
""" set initial mpd state:
max_volume
initial_volume """
mpd_status = self.client.status()
max_volume = self.get_setting("phoniebox","max_volume")
init_volume = self.get_setting("phoniebox","init_volume")
if max_volume == -1:
max_volume = 100 # the absolute max_volume is 100%
if init_volume == -1:
init_volume = 0 # to be able to compare
if max_volume < init_volume:
self.log("init_volume cannot exceed max_volume.",2)
init_volume = max_volume # do not exceed max_volume
if mpd_status["volume"] > max_volume:
self.client.setvol(init_volume)
def set_mpd_playmode(self,cardid):
""" set playmode in mpd according to card settings """
playmode_defaults_map = {"repeat":0,"random":0,"single":0,"consume":0}
set_playmode_map = { "repeat":self.client.repeat,
"random":self.client.random,
"single":self.client.single,
"consume":self.client.consume }
for key in set_playmode_map.keys():
# option is set if config file contains "option = 1" or just "option" without value.
playmode_setting = self.get_cardsetting(cardid,key)
if playmode_setting == -1 or playmode_setting == 1:
playmode_setting = 1
else:
playmode_setting = playmode_defaults_map[key]
# set value
set_playmode_map[key](playmode_setting)
self.log("setting mpd {} = {}".format(key,playmode_setting),5)
def resume(self,cardid,action="resume"):
""" seek to saved position if resume is activated """
self.mpd_connect_timeout()
mpd_status = self.client.status()
print(mpd_status)
if action in ["resume","restore"]:
opt_resume = self.get_cardsetting(cardid,"resume")
if opt_resume == -1 or opt_resume == 1:
resume_elapsed = self.get_cardsetting(cardid,"resume_elapsed")
resume_song = self.get_cardsetting(cardid,"resume_song")
if resume_song == -1:
resume_song = 0
if resume_elapsed != -1 and resume_elapsed != 0:
self.log("{}: resume song {} at time {}s".format(cardid,
self.get_cardsetting(cardid,"resume_song"),
self.get_cardsetting(cardid,"resume_elapsed")),5)
self.client.seek(resume_song,resume_elapsed)
elif action in ["save","store"]:
try:
self.log("{}: save state, song {} at time {}s".format(cardid,
mpd_status["song"],mpd_status["elapsed"]),5)
self.cardAssignments.set(cardid,"resume_elapsed",
mpd_status["elapsed"])
self.cardAssignments.set(cardid,"resume_song",
mpd_status["song"])
except KeyError as e:
print("KeyError: {}".format(e))
except ValueError as e:
print("ValueError: {}".format(e))
def read_cardAssignments(self):
card_assignments_file = self.config.get("phoniebox","card_assignments_file")
parser = ConfigParserExtended(allow_no_value=True)
dataset = parser.read(card_assignments_file)
if len(dataset) != 1:
raise ValueError("Config file {} not found!".format(card_assignments_file))
return parser
def update_cardAssignments(self,static_cardAssignments):
"""card_assignments_file = self.config.get("phoniebox","card_assignments_file")
parser = ConfigParserExtended(allow_no_value=True)
dataset = parser.read(card_assignments_file)
if len(dataset) != 1:
raise ValueError("Config file {} not found!".format(card_assignments_file))
# if cardAssignments is still empty, store new cardAssignments directly
# otherwise compare new values with old values and update only certain values
if hasattr(self, 'cardAssignments'):
self.debug("cardAssignments already set, updating data in memory with new data from file {}".format(card_assignments_file))
static_cardAssignments = parser"""
self.log("Updating changes in cardAssignments from disk.",3)
keep_cardsettings = ["resume_song","resume_elapsed"]
common_sections = list(set(static_cardAssignments.sections()).intersection(self.cardAssignments.sections()))
for section in common_sections:
for option in keep_cardsettings:
if self.cardAssignments.has_option(section,option):
value = self.cardAssignments.get(section,option)
static_cardAssignments.set(section,option,value)
self.log("Updating cardid {} with \"{} = {}\".".format(section,option,value),5)
# finally assign new values
self.cardAssignments = static_cardAssignments
def read_config(self,configFilePath=defaultconfigFilePath):
""" read config variables from file """
configParser = ConfigParserExtended(allow_no_value=True,interpolation=configparser.BasicInterpolation())
dataset = configParser.read(configFilePath)
if len(dataset) != 1:
raise ValueError("Config file {} not found!".format(configFilePath))
self.config = configParser
def translate_legacy_cardAssignments(self,last_translate_legacy_cardAssignments=0):
""" reads the card settings data from the old scheme an translates them """
shortcuts_path = self.get_setting("phoniebox","shortcuts_path")
audiofolders_path = self.get_setting("phoniebox","audiofolders_path")
if shortcuts_path != -1:
configParser = ConfigParserExtended()
shortcut_files = [f for f in os.listdir(shortcuts_path) if os.path.isfile(os.path.join(shortcuts_path,f)) and is_int(f)]
# filename is the cardid
for filename in shortcut_files:
with open(os.path.join(shortcuts_path,filename)) as f:
uri = f.readline().strip().decode('utf-8')
# add default settings
if not filename in configParser.sections():
self.log("Adding section {} to cardAssignments".format(filename),5)
configParser.add_section(filename)
configParser[filename] = self.config["default_cardsettings"]
configParser.set(filename,"cardid",filename)
configParser.set(filename,"uri",uri)
# translate and add folder.conf settings if they contradict default_cardsettings
cardsettings_map = {"CURRENTFILENAME":None,
"ELAPSED":"resume_elapsed",
"PLAYSTATUS":None,
"RESUME":"resume",
"SHUFFLE":"random",
"LOOP":"repeat"}
folderconf = os.path.join(audiofolders_path,uri,"folder.conf")
if os.path.isfile(folderconf) and file_modified(folderconf,last_translate_legacy_cardAssignments):
with open(folderconf) as f:
lines = f.readlines()
cardsettings_old = dict([l.strip().replace('"','').split("=") for l in lines])
for key in cardsettings_old.keys():
if cardsettings_map[key] != None:
# ignore 0 and OFF values, drop settings that have None in cardsettings_map
if key != "ELAPSED":
if cardsettings_old[key] != "0" and cardsettings_old[key] != "OFF":
configParser.set(filename,cardsettings_map[key],"1")
else:
configParser.set(filename,cardsettings_map[key],"0")
else:
try:
elapsed_val = float(cardsettings_old[key])
except ValueError:
elaped_val = 0
configParser.set(filename,cardsettings_map[key],str(elapsed_val))
return configParser
def write_new_cardAssignments(self):
""" updates the cardsettings with according to playstate """
card_assignments_file = self.config.get("phoniebox","card_assignments_file")
self.log("Write new card assignments to file | |
College, Columbia University
##Trocaire College
##U.S. Military Academy
##Union College
##University of Rochester
##University of Stuyvesant
##Utica College of Syracuse University
##Vassar College
##Wagner College
##Webb Institute
##Westchester Business Institute
##Yeshiva University
##Appalachian State University
##Art Institute of Charlotte
##Belmont Abbey College
##Bennett College
##Campbell University
##Catawba College
##Chowan College
##College of the Albemarle
##Davidson College
##Duke University
##East Carolina University
##Elizabeth City State University
##Elon College
##Fayetteville State University
##Gardner-Webb University
##Greensboro College
##Guilford College
##High Point University
##<NAME> University
##Lenoir-Rhyne College
##Meredith College
##Montreat College
##Mount Olive College
##North Carolina A&T State University
##North Carolina Central University
##North Carolina School of the Arts
##North Carolina State University
##Piedmont Baptist College
##Saint Augustine's College
##Salem College
##University of North Carolina
##University of North Carolina - Asheville
##University of North Carolina - Chapel Hill
##University of North Carolina - Charlotte
##University of North Carolina - Greensboro
##University of North Carolina - Pembroke
##University of North Carolina - Wilmington
##Wake Forest University
##Warren Wilson College
##Western Carolina University
##Wingate College
##Winston-Salem State University
##Dickinson State University
##Minot State University
##North Dakota State University
##North Dakota State University - Bottineau
##North Dakota State University - Fargo
##University of North Dakota
##Valley City State University
##Williston State College
##Air Force Institute of Technology
##Antioch College
##Antonelli College
##Art Academy of Cincinnati
##Ashland University
##Baldwin-Wallace College
##Bluffton College
##Bowling Green State University
##Capital University
##Case Western Reserve University
##Cedarville College
##Central State University
##Circleville Bible College
##Cleveland Institute of Art
##Cleveland Institute of Music
##Cleveland State University
##College of Mount St. Joseph
##Columbus State Community College
##David N. Myers College
##Denison University
##DeVRY Institute Of Technology
##Edison State Community College
##Franklin University
##Franciscan University of Steubenville
##Heidelberg College
##Hiram College
##John Carrol University
##Kent State University
##Kent State University - Trumbull
##Kenyon College
##Kettering College of Medical Arts
##Lima Technical College
##Malone College
##Marietta College
##Miami - Jacobs College
##Miami University of Ohio
##Mount Carmel College of Nursing
##Mount Union College
##Mount Vernon Nazarene College
##Muskingum College
##Notre Dame College of Ohio
##Oberlin College
##Ohio Dominican College
##Ohio Northern University
##Ohio State University
##Ohio University
##Ohio University - Lancaster
##Ohio University - Zanesville
##Ohio Wesleyan University
##Shawnee University
##Tiffin University
##University of Akron
##University of Cincinnati
##University of Dayton
##University of Findlay
##University of Toledo
##Ursuline College
##Wilberforce University
##Wilmington College
##Wittenberg University
##Wooster College
##Wright State Uniuversity
##Xavier University
##Youngstown State University
##Cameron University
##East Central University
##Langston University
##Metropolitan College
##Northeast State University
##Oklahoma Baptist University
##Oklahoma Christian University of Science and Art
##Oklahoma City University
##Oklahoma State University
##Oral Roberts University
##Rogers State University
##St. Gregory's University
##Southeastern Oklahoma State University
##Southern Nazarene University
##Spartan School of Aeronautics
##University of Central Oklahoma
##University of Oklahoma
##University of Oklahoma Health Sciences Center
##University of Science & Arts of Oklahoma
##University of Tulsa
##Art Institute of Portland
##Concordia College
##Eastern Oregon University
##George Fox University
##Lewis and Clark College
##Linfield College
##Marylhurst University
##Mount Angel Abbey and Seminary
##Northwest Christian College
##Oregon Graduate Institute
##Oregon Health Sciences University
##Oregon Institute of Technology
##Oregon State University
##Pacific University in Oregon
##Pacific Northwest College of the Arts
##Portland Community College
##Portland State University
##Reed College
##Southern Oregon University
##University of Oregon
##University of Portland
##Warner Pacific College
##Western Baptist College
##Western Oregon University
##Western States Chiropractic College
##Willamette University
##Albright College
##Allegheny College
##Allentown College
##Arcadia University
##Art Institute of Philadelphia
##Art Institute of Pittsburgh
##Bloomsburg University
##Bucknell University
##Bucks College
##Bryn Mawr College
##California University of Pennsylvania
##Carnegie Mellon University
##Chatham College
##Chestnut Hill College
##Cheyney State University
##Clarion University
##College Misericordia
##Delaware Valley College
##DeSales University
##Dickinson College
##Drexel University
##Duquesne University
##Eastern College
##East Stroudsburg University
##Edinboro University of Pennsylvania
##Elizabethtown College
##Franklin & Marshall College
##Gannon University
##Geneva College
##Gettysburg College
##Grove City College
##Haverford College
##Indiana University of Pennsylvania
##Juniata College
##Keystone College
##King's College
##Kutztown University of Pennsylvania
##La Salle University
##Lafayette College
##Lebanon Valley College
##Lehigh University
##Lincoln University of Pennsylvania
##Lock Haven University
##Lycoming College
##MCP Hahnemann University
##Marywood College
##Mercyhurst College
##Messiah College
##Millersville University of Pennsylvania
##Moravian College
##Muhlenberg College
##Peirce College
##Pennsylvania Academy of Fine Arts
##Pennsylvania College of Optometry
##Pennsylvania College of Technology
##Pennsylvania State University
##Philadelphia Biblical University
##Philadelphia University
##Point Park College
##Saint Joseph's University
##Saint Vincent College
##Shippensburg University
##Slippery Rock University
##Susquehanna University
##Swarthmore College
##Temple University
##Thiel College
##Thomas Jefferson University
##University of the Arts
##University of Pennsylvania
##University of Pittsburgh
##University of Pittsburgh at Johnstown
##University of Scranton
##University of the Arts
##University of the Sciences in Philadelphia
##Ursinus College
##Valley Forge Christian College
##Villanova University
##Washington and Jefferson College
##Waynesburg College
##West Chester University
##Westminster College
##Widener University
##Wilkes University
##York College of Pennsylvania
##Colegio Universitario Tecnologico de Bayamon
##Interamerican University of Puerto Rico
##Polytechnic University of Puerto Rico
##Pontifical Catholic University of Puerto Rico
##Sacred Heart University
##Universidad Central de Bayamón
##University of Puerto Rico
##University of Puerto Rico - Aguadilla
##University of Puerto Rico - Mayagüez
##University of Puerto Rico - Rio Piedras
##Brown University
##Bryant College
##Fraunhofer Center for Research in Computer Graphics
##Johnson & Wales University
##Providence College
##Rhode Island School of Design
##Roger Williams University
##Salve Regina University
##University of Rhode Island
##Allen University
##Anderson College
##Bob Jones University
##Charleston Southern University
##The Citadel
##Claflin College
##Clemson University
##Coastal Carolina University
##College of Charleston
##Columbia College
##Erskine College
##Francis Marion University
##Furman University
##Independent Colleges and Universities of South Carolina
##Medical University of South Carolina
##Morris College
##Presbyterian College
##South Carolina State University
##Southern Wesleyan University
##Trident Technical College
##University of South Carolina
##University of South Carolina - Aiken
##University of South Carolina - Beaufort
##University of South Carolina - Columbia
##University of South Carolina - Spartanburg
##Voorhees College
##Winthrop University
##Wofford College
##Augustana College
##Dakota State University
##Northern State University
##South Dakota School of Mines and Technology
##South Dakota State University
##Stanton University
##University for Professional Studies
##University of Sioux Falls
##University of South Dakota
##Austin Peay State University
##Belmont University
##Carson-Newman College
##Christian Brothers University
##East Tennessee State University
##Fisk University
##Freed-Hardeman University
##Harding University Graduate School of Religion
##Knoxville College
##Lambuth University
##Lee University
##Lincoln Memorial University
##Lipscomb University
##Meharry Medical College
##Middle Tennessee State University
##Rhodes College
##Roane State Community College
##Sewanee, The University of the South
##Southern Adventist University
##Tennessee State University
##Tennessee Technological University
##Tennessee Temple University
##Trevecca Nazarene University
##Tusculum College
##Union University
##University of Memphis
##University of Tennessee
##University of Tennessee - Chattanooga
##Univeristy of Tennessee - Knoxville
##University of Tennessee - Martin
##University of Tennessee - Memphis
##University of Tennessee Space Institute
##Vanderbilt University
##Walters State Community College
##Abilene Christian University
##Angelo State University
##Art Institute of Dallas
##Art Institute of Houston
##Austin College
##Baylor University
##Bee County College
##Blinn College
##College of the Mainland
##Collin County Community College District
##Concordia College
##Dallas Baptist University
##Dallas County Community College District
##El Paso Community College
##Houston Community College System
##Huston-Tillotson College
##Lamar University
##LeTourneau University
##Lubbock Christian University
##McMurry University
##Midwestern State University
##Navarro College
##North Harris Montgomery Community College District
##Our Lady of the Lake University
##Paris Junior College
##Prairie View A&M University
##Rice University
##Saint Edward's University
##Saint Mary's University of San Antonio
##Sam Houston State University
##San Antonio College
##Schreiner College
##South Texas College of Law
##Southern Methodist University
##Southwestern University
##Southwest Texas State University
##Stephen F. Austin State University
##Sul Ross State University
##Tarleton State University
##Tarrant County College
##Texas A&M University
##Texas A&M University - Commerce
##Texas A&M University - <NAME>
##Texas A&M University - Galveston
##Texas A&M University - Kingsville
##Texas A&M University - Texarkana
##Texas Christian University
##Texas Lutheran University
##Texas Southern University
##Texas State Technical College - Harlingen
##Texas State Technical College at Waco
##Texas Tech University
##Texas Tech University Health Sciences Center
##Texas Wesleyan University
##Texas Woman's University
##Trinity University
##University of Dallas
##University of Houston
##University of Mary Hardin-Baylor
##University of North Texas
##University of St. Thomas
##University of Texas - Austin
##University of Texas - Arlington
##University of Texas - Brownsville
##University of Texas - Dallas
##University of Texas - El Paso
##University of Texas - Houston
##University of Texas - Pan American
##University of Texas - San Antonio
##University of Texas - TeleCampus
##University of Texas - Tyler
##University of Texas Medical Branch - Galveston
##University of Texas Southwestern Medical Center at Dallas
##University of the Incarnate Word
##Wayland Baptist University
##West Texas A&M University
##Westwood College
##Wiley College
##Brigham Young University
##College of Eastern Utah
##Dixie College
##Hawthorne University
##LDS Business College
##Salt Lake Community College
##Southern Utah University
##Snow College
##University of Phoenix
##University of Utah
##Utah State University
##Utah Valley State College
##Weber State University
##Westminster College of Salt Lake City
##Bennington College
##Castleton State College
##Green Mountain College
##Johnson State College
##Lyndon State College of Vermont
##Marlboro College
##Middlebury College
##Norwich University
##Saint Michael's University
##University of Vermont
##Vermont Technical College
##American Open University
##Art Institute of Washington
##Bridgewater College
##Christopher Newport University
##ECPI College of Technology
##Eastern Mennonite University
##George Mason University
##Germanna Community College
##Hampden-Sydney College
##Hampton University
##Hollins College
##Illawarra College
##James Madison University
##Liberty University
##Longwood College
##Lynchburg College
##Mary Baldwin College
##Mary Washington College
##Marymount University
##Norfolk State University
##Old Dominion University
##Radford University
##Randolph-Macon College
##Randolph-Macon Woman's College
##Regent University
##Roanoke College
##Saint Paul's College
##Shenandoah University
##Strayer University
##Sweet Briar College
##University of Richmond
##University of Virginia
##University of Virginia's College at Wise
##Virginia Commonwealth University
##Virginia International University
##Virginia Military Institute
##Virginia State University
##Virginia Tech
##Virginia Wesleyan College
##Washington & Lee University
##William & Mary
##Antioch University Seattle
##Art Institute of Seattle
##Central Washington University
##City University
##Eastern Washington University
##Evergreen State College
##Gonzaga University
##Henry Cogswell College
##Heritage College
##Northwest College of Art
##Pacific Lutheran University
##Saint Martin's College
##Seattle Pacific University
##Seattle University
##University of Puget Sound
##University of Washington
##Vancouver University Colleges World Wide
##Walla Walla College
##Washington State University
##Washington State University at Tri-Cities
##Washington State University Spokane
##Western Washington University
##Whitman College
##Whitworth College
##American InterContinental University
##American University
##Catholic University of America
##Gallaudet University
##George Washington University
##Georgetown University
##Howard University
##Southeastern University
##Strayer University
##Trinity College
##University of the District of Columbia
##Alderson-Broaddus College
##Bethany College
##Bluefield State College
##Concord College
##Davis and Elkins College
##Fairmont State College
##Glenville State College
##Marshall University
##Salem-Teikyo University
##Shepherd College
##University of Charleston
##West Liberty State College
##Wheeling Jesuit University
##West Virginia State College
##West Virginia University
##West Virginia Wesleyan College
##Alverno College
##Cardinal Stritch University
##Carroll College
##Carthage College
##Concordia University
##Edgewood College
##Lakeland College
##Lawrence University
##Marian College
##Marquette University
##Milwaukee Institute of Art & Design
##Milwaukee School of Engineering
##Mount Mary College
##Mount Senario College
##Northland College
##Ripon College
##Saint Norbert College
##University of Wisconsin - Eau Claire
##University of Wisconsin - Green Bay
##University of Wisconsin - La Crosse
##University of Wisconsin - Madison
##University of Wisconsin - Milwaukee
##University of Wisconsin - Oshkosh
##University of Wisconsin - Parkside
##University of Wisconsin - Platteville
##University of Wisconsin - River Falls
##University of Wisconsin - Stevens Point
##University | |
from .base import BaseClient, api_call
from launchkey.utils import iso_format
from launchkey.entities.validation import DirectoryGetDeviceResponseValidator, DirectoryGetSessionsValidator, \
DirectoryUserDeviceLinkResponseValidator, ServiceValidator, ServiceSecurityPolicyValidator, PublicKeyValidator
from launchkey.entities.service import Service, ServiceSecurityPolicy
from launchkey.entities.directory import Session, DirectoryUserDeviceLinkData, Device
from launchkey.entities.shared import PublicKey
class DirectoryClient(BaseClient):
def __init__(self, subject_id, transport):
super(DirectoryClient, self).__init__('dir', subject_id, transport)
@api_call
def link_device(self, user_id):
"""
Begin the process of Linking a Subscriber Authenticator Device with an End User based on the Directory User ID.
If no Directory User exists for the Directory User ID, the Directory User will be created.
:param user_id: Unique value identifying the End User in the your system. It is the permanent link for the End
User between the your application(s) and the LaunchKey API. This will be used for authorization requests as
well as managing the End User's Devices.
:raise: launchkey.exceptions.InvalidParameters - Input parameters were not correct
:raise: launchkey.exceptions.InvalidDirectoryIdentifier - Input identifier is invalid.
:return: launchkey.entities.directory.DirectoryUserDeviceLinkData - Contains data needed to complete the
linking process
"""
response = self._transport.post("/directory/v3/devices", self._subject, identifier=user_id)
data = self._validate_response(response, DirectoryUserDeviceLinkResponseValidator)
return DirectoryUserDeviceLinkData(data)
@api_call
def get_linked_devices(self, user_id):
"""
Get a list of Subscriber Authenticator Devices for a Directory User. If not Directory User exists for the
Directory User ID, an empty list will be returned.
:param user_id: Unique value identifying the End User in the your system. This value was used to create the
Directory User and Link Device.
:raise: launchkey.exceptions.InvalidParameters - Input parameters were not correct
:return: List - An list of launchkey.entities.directory.Device objects for the specified user identifier.
"""
return [Device(self._validate_response(d, DirectoryGetDeviceResponseValidator)) for d in
self._transport.post("/directory/v3/devices/list", self._subject, identifier=user_id).data]
@api_call
def unlink_device(self, user_id, device_id):
"""
Unlink a users device
:param user_id: Unique value identifying the End User in the your system. This value was used to create the
Directory User and Link Device.
:param device_id: The unique identifier of the Device you wish to Unlink. It would be obtained via Device.id
returned by get_linked_devices().
:raise: launchkey.exceptions.InvalidParameters - Input parameters were not correct
:raise: launchkey.exceptions.EntityNotFound - The input device was not found. It may already be unlinked.
"""
self._transport.delete("/directory/v3/devices", self._subject, identifier=user_id, device_id=str(device_id))
@api_call
def end_all_service_sessions(self, user_id):
"""
End Service User Sessions for all Services in which a Session was started for the Directory User
:param user_id: Unique value identifying the End User in your system. This value was used to create the
Directory User and Link Device.
:raise: launchkey.exceptions.InvalidParameters - Input parameters were not correct
:raise: launchkey.exceptions.EntityNotFound - The user was not found.
"""
self._transport.delete("/directory/v3/sessions", self._subject, identifier=user_id)
@api_call
def get_all_service_sessions(self, user_id):
"""
Retrieves all Service Sessions that belong to a User
:param user_id: Unique value identifying the End User in your system. This value was used to create the
Directory User and Link Device.
:raise: launchkey.exceptions.EntityNotFound - The input user identifier does not exist in your directory, or
it does not have any devices linked to it
:return: List - launchkey.entities.directory.Session
"""
return [Session(self._validate_response(session, DirectoryGetSessionsValidator)) for session in
self._transport.post("/directory/v3/sessions/list", self._subject, identifier=user_id).data]
@api_call
def create_service(self, name, description=None, icon=None, callback_url=None, active=True):
"""
Creates a Directory Service
:param name: Unique name that will be displayed in an Auth Request
:param description: Optional description that can be viewed in the Admin Center or when retrieving the Service.
:param icon: Optional URL to an icon that will be displayed in an Auth Request
:param callback_url: URL that Webhooks will be sent to
:param active: Whether the Service should be able to send Auth Requests
:raise: launchkey.exceptions.InvalidParameters - Input parameters were not correct
:raise: launchkey.exceptions.ServiceNameTaken - Service name already taken
:return: String - ID of the Service that is created
"""
return self._transport.post("/directory/v3/services", self._subject, name=name, description=description,
icon=icon, callback_url=callback_url, active=active).data['id']
@api_call
def get_all_services(self):
"""
Retrieves all Services belonging to a Directory
:return: List - launchkey.entities.service.Service object containing Service details
"""
return [Service(self._validate_response(service, ServiceValidator)) for service in
self._transport.get("/directory/v3/services", self._subject).data]
@api_call
def get_services(self, service_ids):
"""
Retrieves Services based on an input list of Service IDs
:param service_ids: List of unique Service IDs
:raise: launchkey.exceptions.InvalidParameters - Input parameters were not correct
:return: List - launchkey.entities.service.Service object containing Service details
"""
return [Service(self._validate_response(service, ServiceValidator)) for service in
self._transport.post("/directory/v3/services/list", self._subject,
service_ids=[str(service_id) for service_id in service_ids]).data]
@api_call
def get_service(self, service_id):
"""
Retrieves a Service based on an input Service ID
:param service_id: Unique Service ID
:raise: launchkey.exceptions.InvalidParameters - Input parameters were not correct
:return: launchkey.entities.service.Service object containing Service details
"""
return Service(self._validate_response(
self._transport.post("/directory/v3/services/list", self._subject, service_ids=[str(service_id)]).data[0],
ServiceValidator))
@api_call
def update_service(self, service_id, name=False, description=False, icon=False, callback_url=False, active=None):
"""
Updates a Service's general settings. If an optional parameter is not included it will not be updated.
:param service_id: Unique Service ID
:param name: Unique name that will be displayed in an Auth Request
:param description: Description that can be viewed in the Admin Center or when retrieving the Service.
:param icon: URL to an icon that will be displayed in an Auth Request
:param callback_url: URL that Webhooks will be sent to
:param active: Whether the Service should be able to send Auth Requests
:raise: launchkey.exceptions.InvalidParameters - Input parameters were not correct
:raise: launchkey.exceptions.ServiceNameTaken - Service name already taken
:raise: launchkey.exceptions.ServiceNotFound - No Service could be found matching the input ID
:raise: launchkey.exceptions.Forbidden - The Service you requested either does not exist or you do not have
sufficient permissions.
:return:
"""
kwargs = {"service_id": str(service_id)}
if name is not False:
kwargs['name'] = name
if description is not False:
kwargs['description'] = description
if icon is not False:
kwargs['icon'] = icon
if callback_url is not False:
kwargs['callback_url'] = callback_url
if active is not None:
kwargs['active'] = active
self._transport.patch("/directory/v3/services", self._subject, **kwargs)
@api_call
def add_service_public_key(self, service_id, public_key, expires=None, active=None):
"""
Adds a public key to a Directory Service
:param service_id: Unique Service ID
:param public_key: String RSA public key
:param expires: Optional datetime.datetime stating a time in which the key will no longer be valid
:param active: Optional bool stating whether the key should be considered active and usable.
:raise: launchkey.exceptions.InvalidParameters - Input parameters were not correct
:raise: launchkey.exceptions.InvalidPublicKey - The public key you supplied is not valid.
:raise: launchkey.exceptions.PublicKeyAlreadyInUse - The public key you supplied already exists for the
requested entity. It cannot be added again.
:raise: launchkey.exceptions.Forbidden - The Service you requested either does not exist or you do not have
sufficient permissions.
:return: MD5 fingerprint (key_id) of the public key, IE: e0:2f:a9:5a:76:92:6b:b5:4d:24:67:19:d1:8a:0a:75
"""
kwargs = {"service_id": str(service_id), "public_key": public_key}
if expires is not None:
kwargs['date_expires'] = iso_format(expires)
if active is not None:
kwargs['active'] = active
return self._transport.post("/organization/v3/service/keys", self._subject, **kwargs).data['key_id']
@api_call
def remove_service_public_key(self, service_id, key_id):
"""
Removes a public key from a Directory Service. You may only remove a public key if other public keys exist.
If you wish for a last remaining key to no longer be usable, use update_service_public_key to instead and set it
as inactive.
:param service_id: Unique Service ID
:param key_id: MD5 fingerprint of the public key, IE: fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:4d:24:67:19:d1:8a:0a:75
:raise: launchkey.exceptions.InvalidParameters - Input parameters were not correct
:raise: launchkey.exceptions.PublicKeyDoesNotExist - The key_id you supplied could not be found
:raise: launchkey.exceptions.LastRemainingKey - The last remaining public key cannot be removed
:raise: launchkey.exceptions.Forbidden - The Service you requested either does not exist or you do not have
sufficient permissions.
:return:
"""
self._transport.delete("/directory/v3/service/keys", self._subject, service_id=str(service_id),
key_id=key_id)
@api_call
def get_service_public_keys(self, service_id):
"""
Retrieves a list of Public Keys belonging to a Service
:param service_id: Unique Service ID
:raise: launchkey.exceptions.InvalidParameters - Input parameters were not correct
:raise: launchkey.exceptions.ServiceNotFound - No Service could be found matching the input ID
:raise: launchkey.exceptions.Forbidden - The Service you requested either does not exist or you do not have
sufficient permissions.
:return: List - launchkey.entities.shared.PublicKey
"""
return [PublicKey(self._validate_response(key, PublicKeyValidator)) for key in
self._transport.post("/directory/v3/service/keys/list", self._subject,
service_id=str(service_id)).data]
@api_call
def update_service_public_key(self, service_id, key_id, expires=False, active=None):
"""
Removes a public key from an Directory Service
:param service_id: Unique Service ID
:param key_id: MD5 fingerprint of the public key, IE: e0:2f:a9:5a:76:92:6b:b5:4d:24:67:19:d1:8a:0a:75
:param expires: datetime.datetime stating a time in which the key will no longer be valid
:param active: Bool stating whether the | |
<reponame>cuddebtj/Fantasy-Sidelines
from api import SportRadar
# from api import MySportsFeeds
import pandas as pd
import numpy as np
import time
import gspread
import re
from gspread_pandas import Spread
def schedule_clean(data):
schedule = pd.DataFrame()
season = pd.json_normalize(data).add_prefix("season.")
season_weeks = pd.json_normalize(season["season.weeks"])
for wk in range(len(season_weeks.columns)):
season_weeks_week = pd.json_normalize(season_weeks[wk]).add_prefix("week.")
season_weeks_week_games = pd.json_normalize(season_weeks_week["week.games"])
_week = pd.DataFrame()
for gm in range(len(season_weeks_week_games.columns)):
try:
season_weeks_week_games_game = pd.json_normalize(
season_weeks_week_games[gm]
).add_prefix("game.")
season_weeks_week_games_game_periods = pd.json_normalize(
season_weeks_week_games_game["game.scoring.periods"]
)
_period = pd.DataFrame()
for p in range(len(season_weeks_week_games_game_periods.columns)):
season_weeks_week_games_game_periods_period = pd.json_normalize(
season_weeks_week_games_game_periods[p]
).add_prefix(f"periods{p+1}.")
_period = _period.join(
season_weeks_week_games_game_periods_period, how="outer"
)
_game = season_weeks_week_games_game.join(
[_period, season, season_weeks_week], how="outer"
)
except:
continue
_week = pd.concat([_game, _week])
schedule = pd.concat([_week, schedule])
schedule.drop(
[
"game.scoring.periods",
"season._comment",
"season.weeks",
"week.games",
"week.bye_week",
],
axis=1,
inplace=True,
)
away = schedule.copy(deep=True)
home = schedule.copy(deep=True)
for col in home.columns:
home = home.rename(columns={col: str(col).replace("away", "opp")})
for col in home.columns:
home = home.rename(columns={col: str(col).replace("home", "team")})
home["home.away"] = "home"
for col in away.columns:
away = away.rename(columns={col: str(col).replace("home", "opp")})
for col in away.columns:
away = away.rename(columns={col: str(col).replace("away", "team")})
away["home.away"] = "away"
schedule = pd.concat([home, away])
schedule = schedule.rename(
columns={
"game.venue.id": "venue.id",
"game.venue.name": "venue.name",
"game.venue.city": "venue.city",
"game.venue.state": "venue.state",
"game.venue.country": "venue.country",
"game.venue.zip": "venue.zip",
"game.venue.address": "veunue.address",
"game.venue.capacity": "venue.capacity",
"game.venue.surface": "venue.surface",
"game.venue.roof_type": "venue.roof_type",
"game.venue.sr_id": "venue.sr_id",
"game.venue.location.lat": "venue.location.lat",
"game.venue.location.lng": "venue.location.lng",
"game.team.id": "team.id",
"game.team.name": "team.name",
"game.team.alias": "team.alias",
"game.team.game_number": "team.game_number",
"game.team.sr_id": "team.sr_id",
"game.opp.id": "opp.id",
"game.opp.name": "opp.name",
"game.opp.alias": "opp.alias",
"game.opp.game_number": "opp.game_number",
"game.opp.sr_id": "opp.sr_id",
}
)
schedule[
[
"game.reference",
"game.utc_offset",
"game.venue.location.lat",
"game.venue.location.lng",
"periods1.sequence",
"periods2.sequence",
"periods3.sequence",
"periods4.sequence",
"periods5.sequence",
"season.year",
"week.sequence",
"week.title",
]
] = schedule[
[
"game.reference",
"game.utc_offset",
"game.venue.location.lat",
"game.venue.location.lng",
"periods1.sequence",
"periods2.sequence",
"periods3.sequence",
"periods4.sequence",
"periods5.sequence",
"season.year",
"week.sequence",
"week.title",
]
].astype(
str
)
return schedule
def player_stats_clean(data):
df = pd.json_normalize(data)
team_cols = []
players_cols = []
for col in df.columns:
if "player" not in col:
team_cols.append(col)
elif "player" in col:
players_cols.append(col)
team_stats = df[team_cols]
player_stats = df[players_cols]
player_stats_df = pd.DataFrame()
for c in players_cols:
keep_columns = ["id", "name", "position", "sr_id", "jersey"]
if "home" in c:
df_1 = pd.json_normalize(player_stats[c])
df_2 = pd.DataFrame()
keep_columns = ["id", "name", "position", "sr_id", "jersey"]
for col in df_1.columns:
bridge = pd.json_normalize(df_1[col])
bridge.columns = [
"{}{}".format("" if z in keep_columns else c[16:-7], z)
for z in bridge.columns
]
bridge.rename(
columns={
"id": "player.id",
"name": "player.name",
"position": "player.position",
"sr_id": "player.sr_id",
"jersey": "player.jersey_num",
},
inplace=True,
)
bridge[
[
"team.alias",
"team.name",
"team.id",
"season",
"season.id",
"week",
"week.id",
"opp.alias",
"opp.name",
"opp.id",
"game.id",
]
] = team_stats[
[
"summary.home.alias",
"summary.home.name",
"summary.home.id",
"summary.season.year",
"summary.season.id",
"summary.week.sequence",
"summary.week.id",
"summary.away.alias",
"summary.away.name",
"summary.away.id",
"id",
]
]
bridge["home.away"] = "home"
df_2 = pd.concat([bridge, df_2])
elif "away" in c:
df_1 = pd.json_normalize(player_stats[c])
df_2 = pd.DataFrame()
for col in df_1.columns:
bridge = pd.json_normalize(df_1[col])
bridge.columns = [
"{}{}".format("" if z in keep_columns else c[16:-7], z)
for z in bridge.columns
]
bridge.rename(
columns={
"id": "player.id",
"name": "player.name",
"position": "player.position",
"sr_id": "player.sr_id",
"jersey": "player.jersey_num",
},
inplace=True,
)
bridge[
[
"team.alias",
"team.name",
"team.id",
"season",
"season.id",
"week",
"week.id",
"opp.alias",
"opp.name",
"opp.id",
"game.id",
]
] = team_stats[
[
"summary.away.alias",
"summary.away.name",
"summary.away.id",
"summary.season.year",
"summary.season.id",
"summary.week.sequence",
"summary.week.id",
"summary.home.alias",
"summary.home.name",
"summary.home.id",
"id",
]
]
bridge["home.away"] = "away"
df_2 = pd.concat([bridge, df_2])
player_stats_df = pd.concat([df_2, player_stats_df])
groupby_list = [
"player.id",
"player.name",
"player.position",
"player.sr_id",
"player.jersey_num",
"team.alias",
"team.name",
"team.id",
"season",
"season.id",
"week",
"week.id",
"opp.alias",
"opp.name",
"opp.id",
"game.id",
]
player_stats_df = player_stats_df.groupby(groupby_list).sum().reset_index()
replace = [" III", " II", " IV", " V", " Jr.", " Sr.", " Sr", " Jr"]
player_stats_df["player.name"] = player_stats_df["player.name"].str.replace(
"|".join([re.escape(s) for s in replace]), "", regex=True
)
player_stats_df = player_stats_df.drop_duplicates()
player_stats_df[
["player.jersey_num", "season.year", "week.sequence"]
] = player_stats_df[["player.jersey_num", "season.year", "week.sequence"]].astype(
str
)
return player_stats_df
def team_stats_clean(data):
df = pd.json_normalize(data)
team_cols = []
for col in df.columns:
if "player" not in col and "_comment" not in col:
team_cols.append(col)
team_stats_df = df[team_cols]
team_stats_df = team_stats_df.add_prefix("game.")
away = team_stats_df.copy(deep=True)
home = team_stats_df.copy(deep=True)
for col in home.columns:
home = home.rename(columns={col: str(col).replace("away", "opp")})
for col in home.columns:
home = home.rename(columns={col: str(col).replace("home", "team")})
home["home.away"] = "home"
for col in away.columns:
away = away.rename(columns={col: str(col).replace("home", "opp")})
for col in away.columns:
away = away.rename(columns={col: str(col).replace("away", "team")})
away["home.away"] = "away"
team_stats_df = pd.concat([home, away])
cols = []
for col in team_stats_df.columns:
if "game.summary." in col:
cols.append(col[len("game.summary.") :])
elif "game.statistics." in col:
cols.append(col[len("game.statistics.") :])
else:
cols.append(col)
team_stats_df.columns = cols
team_stats_df[
[
"game.reference",
"game.number",
"game.utc_offset",
"game.quarter",
"season.year",
"week.sequence",
"week.title",
"venue.location.lat",
"venue.location.lng",
]
] = team_stats_df[
[
"game.reference",
"game.number",
"game.utc_offset",
"game.quarter",
"season.year",
"week.sequence",
"week.title",
"venue.location.lat",
"venue.location.lng",
]
].astype(
str
)
return team_stats_df
def get_season(
api_key,
year,
access_level="trial",
version="v7",
language_code="en",
format_="json",
sleep_time=2,
timeout=5,
):
season = SportRadar(
api_key,
access_level=access_level,
version=version,
language_code=language_code,
format_=format_,
sleep_time=sleep_time,
timeout=timeout,
)
schedule_api = season.season_schedule(year)
schedule = schedule_clean(schedule_api)
game_id_list = list(schedule["game.id"].unique())
players_stats = pd.DataFrame()
teams_stats = pd.DataFrame()
for game_id in range(len(game_id_list)):
statistics_api = season.game_statistics(game_id_list[game_id])
players = player_stats_clean(statistics_api)
teams = team_stats_clean(statistics_api)
players_stats = pd.concat([players, players_stats])
teams_stats = pd.concat([teams, teams_stats])
return schedule, players_stats, teams_stats
def practice_par(season_start, season_end):
gc = gspread.service_account()
spread = gc.open("PlayerPractice")
weeks = list(range(1, 18))
seasons = list(range(season_start, season_end + 1))
teams_id = [
"Arizona Cardinals",
"Atlanta Falcons",
"Baltimore Ravens",
"Buffalo Bills",
"Carolina Panthers",
"Chicago Bears",
"Cincinnati Bengals",
"Cleveland Browns",
"Dallas Cowboys",
"<NAME>",
"Detroit Lions",
"Green Bay Packers",
"Houston Texans",
"Indianapolis Colts",
"Jacksonville Jaguars",
"Kansas City Chiefs",
"Los Angeles Rams",
"San Diego Chargers",
"Miami Dolphins",
"Minnesota Vikings",
"New England Patriots",
"New Orleans Saints",
"New York Giants",
"New York Jets",
"Oakland Raiders",
"Las Vegas Raiders",
"Philadelphia Eagles",
"Pittsburgh Steelers",
"Los Angeles Chargers",
"San Francisco 49ers",
"Seattle Seahawks",
"Tampa Bay Buccaneers",
"Tennessee Titans",
"W<NAME>",
"Washington Football Team",
]
practice_par_df = pd.DataFrame()
for season in seasons:
for week in weeks:
try:
sh = str(season)
sheet = spread.worksheet(sh)
data = sheet.col_values(week)
idx = [data.index(team) for team in teams_id if team in data]
idx.append(len(data))
idx.sort()
final_df = pd.DataFrame()
for i in range(len(idx[:-1])):
start = idx[i] + 12
end = idx[i + 1]
final_data = {}
final_data["team"] = [
data[start - 12] for i in range(start, end, 6)
]
final_data["player.name"] = [
data[i - 5] for i in range(start, end, 6)
]
final_data["injury"] = [data[i - 4] for i in range(start, end, 6)]
final_data["date1"] = [
data[start - 9] for i in range(start, end, 6)
]
final_data["date1.status"] = [
data[i - 3] for i in range(start, end, 6)
]
final_data["date2"] = [
data[start - 8] for i in range(start, end, 6)
]
final_data["date2.status"] = [
data[i - 2] for i in range(start, end, 6)
]
final_data["date3"] = [
data[start - 7] for i in range(start, end, 6)
]
final_data["date3.status"] = [
data[i - 1] for i in range(start, end, 6)
]
final_data["game.status"] = [data[i] for i in range(start, end, 6)]
df = pd.DataFrame.from_dict(final_data)
final_df = pd.concat([final_df, df])
final_df["season"] = season
final_df["week"] = week
final_df[["date1", "date2", "date3"]] = final_df[
["date1", "date2", "date3"]
].replace(
{
"Tue ": "",
"Wed ": "",
"Thu ": "",
"Mon ": "",
"Sun ": "",
"Fri ": "",
"Sat ": "",
},
regex=True,
)
final_df[["last.date", "game.status", "home.away", "opp"]] = final_df[
"game.status"
].str.split(expand=True)
final_df[["player.name", "player.pos"]] = final_df[
"player.name"
].str.split(", ", 1, expand=True)
final_df = final_df.drop(["last.date"], axis=1)
final_df.sort_values(["team", "player.name"], inplace=True)
final_df["home.away"] = final_df.groupby("team")["home.away"].transform(
lambda x: x.bfill().ffill()
)
final_df["opp"] = final_df.groupby("team")["opp"].transform(
lambda x: x.bfill().ffill()
)
final_df = final_df.replace({"--": np.nan})
final_df["game.status"] = final_df["game.status"].replace(
{None: "Full"}
)
final_df["home.away"] = final_df["home.away"].replace(
{"vs": "home", "@": "away"}
)
final_df["team"] = final_df["team"].replace(
{
"Arizona Cardinals": "ARI",
"Atlanta Falcons": "ATL",
"Baltimore Ravens": "BAL",
"Buffalo Bills": "BUF",
"Carolina Panthers": "CAR",
"Chicago Bears": "CHI",
"Cincinnati Bengals": "CIN",
"Cleveland Browns": "CLE",
"Dallas Cowboys": "DAL",
"<NAME>": "DEN",
"Detroit Lions": "DET",
"Green Bay Packers": "GB",
"Houston Texans": "HOU",
"Indianapolis Colts": "IND",
"Jacksonville Jaguars": "JAC",
"Kansas City Chiefs": "KC",
"Las Vegas Raiders": "LV",
"Los Angeles Chargers": "LAC",
"Los Angeles Rams": "LA",
"Miami Dolphins": "MIA",
"Minnesota Vikings": "MIN",
"New England Patriots": "NE",
"New Orleans Saints": "NO",
"New York Giants": "NYG",
"New York Jets": "NYJ",
"Oakland Raiders": "OAK",
"Philadelphia Eagles": "PHI",
"Pittsburgh Steelers": "PIT",
"San Diego Chargers": "SD",
"San Francisco 49ers": "SF",
"Seattle Seahawks": "SEA",
"Tampa Bay Buccaneers": "TB",
"Tennessee Titans": "TEN",
"Washington Football Team": "WAS",
"Washington Redskins": "WAS",
}
)
final_df["opp"] = final_df["opp"].replace(
{
"Ari": "ARI",
"Atl": "ATL",
"Bal": "BAL",
"Buf": "BUF",
"Car": "CAR",
"Chi": "CHI",
"Cin": "CIN",
"Cle": "CLE",
"Dal": "DAL",
"Den": "DEN",
"Det": "DET",
"Hou": "HOU",
"Ind": "IND",
"Jax": "JAC",
"Mia": "MIA",
"Min": "MIN",
| |
i_k)}
= \Psi_{i_1} \Psi_{i_2} \cdots \Psi_{i_k}`.
The `\Psi`-basis is a basis only when the base ring is a
`\QQ`-algebra (although the `\Psi^I` can be defined over any base
ring). The elements of the `\Psi`-basis are known as the
"power-sum non-commutative symmetric functions of the first kind".
The generators `\Psi_n` correspond to the Dynkin
(quasi-)idempotents in the descent algebras of the symmetric
groups (see [NCSF1]_, 5.2 for details).
Another (equivalent) definition of `\Psi_n` is
.. MATH::
\Psi_n = \sum_{i=0}^{n-1} (-1)^i R_{1^i, n-i},
where `R` denotes the ribbon basis of `NCSF`, and where `1^i`
stands for `i` repetitions of the integer `1`.
EXAMPLES::
sage: NCSF = NonCommutativeSymmetricFunctions(QQ)
sage: Psi = NCSF.Psi(); Psi
Non-Commutative Symmetric Functions over the Rational Field in the Psi basis
sage: Psi.an_element()
2*Psi[] + 2*Psi[1] + 3*Psi[1, 1]
Checking the equivalent definition of `\Psi_n`::
sage: def test_psi(n):
....: NCSF = NonCommutativeSymmetricFunctions(ZZ)
....: R = NCSF.R()
....: Psi = NCSF.Psi()
....: a = R.sum([(-1) ** i * R[[1]*i + [n-i]]
....: for i in range(n)])
....: return Psi(a) == Psi[n]
sage: test_psi(2)
True
sage: test_psi(3)
True
sage: test_psi(4)
True
"""
def __init__(self, NCSF):
r"""
TESTS:
We include a sanity test to verify the conversion to
and from the complete basis works the way it should::
sage: S = NonCommutativeSymmetricFunctions(QQ).complete()
sage: Psi = NonCommutativeSymmetricFunctions(QQ).Psi(); Psi
Non-Commutative Symmetric Functions over the Rational Field in the Psi basis
sage: all(S(Psi(S[comp])) == S[comp] for comp in Compositions(5))
True
sage: all(Psi(S(Psi[comp])) == Psi[comp] for comp in Compositions(5))
True
"""
CombinatorialFreeModule.__init__(self, NCSF.base_ring(), Compositions(),
prefix='Psi', bracket=False,
category=NCSF.MultiplicativeBasesOnPrimitiveElements())
# TODO: should those be defined using algebra morphism?
def _from_complete_on_generators(self, n):
r"""
Expand a complete generator of non-commutative symmetric
functions in the Psi basis.
INPUT:
- ``n`` -- a positive integer
OUTPUT:
- The expansion of the complete generator indexed by ``n`` into the
Psi basis.
TESTS::
sage: S = NonCommutativeSymmetricFunctions(QQ).complete()
sage: Psi = NonCommutativeSymmetricFunctions(QQ).Psi()
sage: Psi._from_complete_on_generators(1)
Psi[1]
sage: Psi._from_complete_on_generators(2)
1/2*Psi[1, 1] + 1/2*Psi[2]
sage: Psi._from_complete_on_generators(3)
1/6*Psi[1, 1, 1] + 1/3*Psi[1, 2] + 1/6*Psi[2, 1] + 1/3*Psi[3]
"""
# Equation (58) of NCSF I article
one = self.base_ring().one()
I = self._basis_keys([n])
# TODO: I being trivial, there is no refinement going on here, so
# one can probably be a bit more explicit / fast
return self.sum_of_terms( ( (J, one/coeff_pi(J,I)) for J in Compositions(n) ),
distinct=True )
def _from_complete_on_basis(self, I):
r"""
Expand a complete basis element of non-commutative symmetric functions
in the Psi basis.
INPUT:
- ``I`` -- a composition
OUTPUT:
- The expansion of the complete function indexed by ``I`` in the
Psi basis.
TESTS::
sage: S = NonCommutativeSymmetricFunctions(QQ).complete()
sage: Psi = NonCommutativeSymmetricFunctions(QQ).Psi()
sage: Psi._from_complete_on_basis(Composition([1]))
Psi[1]
sage: Psi._from_complete_on_basis(Composition([2]))
1/2*Psi[1, 1] + 1/2*Psi[2]
sage: Psi._from_complete_on_basis(Composition([3]))
1/6*Psi[1, 1, 1] + 1/3*Psi[1, 2] + 1/6*Psi[2, 1] + 1/3*Psi[3]
sage: Psi._from_complete_on_basis(Composition([2,1]))
1/2*Psi[1, 1, 1] + 1/2*Psi[2, 1]
sage: Psi._from_complete_on_basis(Composition([1,2]))
1/2*Psi[1, 1, 1] + 1/2*Psi[1, 2]
sage: Psi._from_complete_on_basis(Composition([1,1,1]))
Psi[1, 1, 1]
"""
# TODO: make this comment into a reference in the doctest (same thing elsewhere)
# Proposition 4.5 of NCSF I article
one = self.base_ring().one()
return self.sum_of_terms( ( (J, one/coeff_pi(J,I)) for J in I.finer() ),
distinct=True )
def _to_complete_on_basis(self, I):
r"""
Expand a Psi basis element of non-commutative symmetric functions
in the complete basis.
INPUT:
- ``I`` -- a composition
OUTPUT:
- The expansion of the Psi function indexed by ``I`` in the
complete basis.
TESTS::
sage: S = NonCommutativeSymmetricFunctions(QQ).complete()
sage: Psi = NonCommutativeSymmetricFunctions(QQ).Psi()
sage: Psi._to_complete_on_basis(Composition([1]))
S[1]
sage: Psi._to_complete_on_basis(Composition([2]))
-S[1, 1] + 2*S[2]
sage: Psi._to_complete_on_basis(Composition([1,1]))
S[1, 1]
sage: Psi._to_complete_on_basis(Composition([3]))
S[1, 1, 1] - 2*S[1, 2] - S[2, 1] + 3*S[3]
sage: Psi._to_complete_on_basis(Composition([2,1]))
-S[1, 1, 1] + 2*S[2, 1]
sage: Psi._to_complete_on_basis(Composition([1,2]))
-S[1, 1, 1] + 2*S[1, 2]
sage: Psi._to_complete_on_basis(Composition([1,1,1]))
S[1, 1, 1]
"""
# Proposition 4.5 of NCSF I article
minus_one = -self.base_ring().one()
complete = self.realization_of().complete()
return complete.sum_of_terms( ( (J, minus_one**(len(J)+len(I))*coeff_lp(J,I))
for J in I.finer() ),
distinct=True )
def internal_product_on_basis_by_bracketing(self, I, J):
r"""
The internal product of two elements of the Psi basis.
See :meth:`~sage.combinat.ncsf_qsym.generic_basis_code.GradedModulesWithInternalProduct.ElementMethods.internal_product`
for a thorough documentation of this operation.
This is an implementation using [NCSF2]_ Lemma 3.10.
It is fast when the length of `I` is small, but can get
very slow otherwise. Therefore it is not being used by
default for internally multiplying Psi functions.
INPUT:
- ``I``, ``J`` -- compositions
OUTPUT:
- The internal product of the elements of the Psi basis of
`NSym` indexed by ``I`` and ``J``, expressed in the Psi
basis.
AUTHORS:
- <NAME>, 29 Mar 2014
EXAMPLES::
sage: N = NonCommutativeSymmetricFunctions(QQ)
sage: Psi = N.Psi()
sage: Psi.internal_product_on_basis_by_bracketing([2,2],[1,2,1])
0
sage: Psi.internal_product_on_basis_by_bracketing([1,2,1],[2,1,1])
4*Psi[1, 2, 1]
sage: Psi.internal_product_on_basis_by_bracketing([2,1,1],[1,2,1])
4*Psi[2, 1, 1]
sage: Psi.internal_product_on_basis_by_bracketing([1,2,1], [1,1,1,1])
0
sage: Psi.internal_product_on_basis_by_bracketing([3,1], [1,2,1])
-Psi[1, 2, 1] + Psi[2, 1, 1]
sage: Psi.internal_product_on_basis_by_bracketing([1,2,1], [3,1])
0
sage: Psi.internal_product_on_basis_by_bracketing([2,2],[1,2])
0
sage: Psi.internal_product_on_basis_by_bracketing([4], [1,2,1])
-Psi[1, 1, 2] + 2*Psi[1, 2, 1] - Psi[2, 1, 1]
TESTS:
The internal product computed by this method is identical with
the one obtained by coercion to the complete basis::
sage: S = N.S()
sage: def psi_int_test(n):
....: for I in Compositions(n):
....: for J in Compositions(n):
....: a = S(Psi.internal_product_on_basis_by_bracketing(I, J))
....: b = S(Psi[I]).internal_product(S(Psi[J]))
....: if a != b:
....: return False
....: return True
sage: all( psi_int_test(i) for i in range(4) )
True
sage: psi_int_test(4) # long time
True
sage: psi_int_test(5) # long time
True
"""
# The algorithm used here is described in
# :meth:`generic_basis_code.GradedModulesWithInternalProduct.ElementMethods.internal_product`.
if sum(I) != sum(J):
return self.zero()
p = len(I)
q = len(J)
if p > q:
return self.zero()
if p == q:
Is = sorted(I, reverse=True)
Js = sorted(J, reverse=True)
if Is != Js:
return 0
return Partition(Is).centralizer_size() * self[I]
# If we're still here, we must have p < q.
def Gamma(K):
r"""
Compute `\Gamma_K` for a nonempty composition `K` (which
can be encoded as a list). See the doc of
:meth:`~sage.combinat.ncsf_qsym.generic_basis_code.GradedModulesWithInternalProduct.ElementMethods.internal_product`
for a definition of this.
"""
k1 = K[0]
res = k1 * self[k1]
for k in K[1:]:
Psik = self[k]
res = res * Psik - Psik * res
return res
# Special case when I = [n], there is exactly one ordered set
# partition and letting this case through would mean another
# case check during the backtracking algorithm
if p == 1:
return Gamma(J)
# We now need to sum over all ordered set partitions
# `(K_1, K_2, \ldots, K_p)` of `\{ 1, 2, \ldots, q \}`
# into `p` parts such that
# each `0 \leq k < p` satisfies `|J_{K_k}| = I_k`.
# To do so, we will encode such partitions as lists
# of subsets (which themselves are encoded as lists,
# in increasing order, with every entry decremented by
# 1 so as to simplify indexing).
# We create a variable K which traverses
# (among other things) these ordered set partitions in
# lexicographic order (on lists of lists of integers,
# NOT flattened). It follows a backtracking
# algorithm; when not backtracking, the last entry
# of its last part will be "exploring" different
# possible values.
K = [[-1]]
cur_sum = 0
# base will be the set of elements that are currently
# not in K (again, all decremented by 1). Here, the
# last entry of the last part of K does not count as
# being in K when we are between ordered set partitions.
base = set(range(q))
result = self.zero()
while True:
# If K is too long or there is nothing more to add:
# backtrack by removing the last part of K.
if len(K) > p or not base:
# Remove the last part from K.
base.union(K.pop()[:-1])
# We don't need checks here since p > 0 and all parts
# have size > 0 or we couldn't have added everything to the first
part = K[-1]
base.add(part[-1])
# Similarly, we can just continue on
else:
part = K[-1]
# part is | |
for data object(s) to close or job(s) to finish',
description='Polls the state of specified data object(s) or job(s) until they are all in the desired state. Waits until the "closed" state for a data object, and for any terminal state for a job ("terminated", "failed", or "done"). Exits with a non-zero code if a job reaches a terminal state that is not "done". Can also provide a local file containing a list of data object(s) or job(s), one per line; the file will be read if "--from-file" argument is added.',
prog='dx wait',
parents=[env_args])
path_action = parser_wait.add_argument('path', help='Path to a data object, job ID, or file with IDs to wait for', nargs='+')
path_action.completer = DXPathCompleter()
parser_wait.add_argument('--from-file', help='Read the list of objects to wait for from the file provided in path', action='store_true')
parser_wait.set_defaults(func=wait)
register_parser(parser_wait, categories=('data', 'metadata', 'exec'))
#####################################
# get
#####################################
parser_get = subparsers.add_parser('get', help='Download records, apps, applets, workflows, and files',
description='Download the contents of some types of data (records, apps, applets, workflows, and files). Downloading an app, applet or a workflow will attempt to reconstruct a source directory that can be used to rebuild it with "dx build". Use "-o -" to direct the output to stdout.',
prog='dx get',
parents=[env_args])
parser_get.add_argument('path', help='Data object ID or name to access').completer = DXPathCompleter(classes=['file', 'record', 'applet', 'app', 'workflow'])
parser_get.add_argument('-o', '--output', help='local file path where the data is to be saved ("-" indicates stdout output for objects of class file and record). If not supplied, the object\'s name on the platform will be used, along with any applicable extensions. For app(let) and workflow objects, if OUTPUT does not exist, the object\'s source directory will be created there; if OUTPUT is an existing directory, a new directory with the object\'s name will be created inside it.')
parser_get.add_argument('--no-ext', help='If -o is not provided, do not add an extension to the filename', action='store_true')
parser_get.add_argument('--omit-resources', help='When downloading an app(let), omit fetching the resources associated with the app(let).', action='store_true')
parser_get.add_argument('-f', '--overwrite', help='Overwrite the local file if necessary', action='store_true')
parser_get.set_defaults(func=get)
register_parser(parser_get, categories='data')
#####################################
# find
#####################################
parser_find = subparsers.add_parser('find', help='Search functionality over various DNAnexus entities',
description='Search functionality over various DNAnexus entities.',
formatter_class=argparse.RawTextHelpFormatter,
prog='dx find')
subparsers_find = parser_find.add_subparsers(parser_class=DXArgumentParser)
subparsers_find.metavar = 'category'
register_parser(parser_find, categories=())
parser_find_apps = subparsers_find.add_parser(
'apps',
help=fill('List available apps'),
description=fill('Finds apps subject to the given search parameters. Use --category to restrict by a category; '
'common categories are available as tab completions and can be listed with --category-help.'),
parents=[stdout_args, json_arg, delim_arg, env_args],
prog='dx find apps'
)
parser_find_apps.add_argument('--name', help='Name of the app')
parser_find_apps.add_argument('--category', help='Category of the app').completer = ListCompleter(APP_CATEGORIES)
parser_find_apps.add_argument('--category-help',
help='Print a list of common app categories',
nargs=0,
action=PrintCategoryHelp)
parser_find_apps.add_argument('-a', '--all', help='Return all versions of each app', action='store_true')
parser_find_apps.add_argument('--unpublished', help='Return only unpublished apps (if omitted, returns only published apps)', action='store_true')
parser_find_apps.add_argument('--installed', help='Return only installed apps', action='store_true')
parser_find_apps.add_argument('--billed-to', help='User or organization responsible for the app')
parser_find_apps.add_argument('--creator', help='Creator of the app version')
parser_find_apps.add_argument('--developer', help='Developer of the app')
parser_find_apps.add_argument('--created-after', help='Date (e.g. 2012-01-01) or integer timestamp after which the app version was created (negative number means ms in the past, or use suffix s, m, h, d, w, M, y)')
parser_find_apps.add_argument('--created-before', help='Date (e.g. 2012-01-01) or integer timestamp before which the app version was created (negative number means ms in the past, or use suffix s, m, h, d, w, M, y)')
parser_find_apps.add_argument('--mod-after', help='Date (e.g. 2012-01-01) or integer timestamp after which the app was last modified (negative number means ms in the past, or use suffix s, m, h, d, w, M, y)')
parser_find_apps.add_argument('--mod-before', help='Date (e.g. 2012-01-01) or integer timestamp before which the app was last modified (negative number means ms in the past, or use suffix s, m, h, d, w, M, y)')
parser_find_apps.set_defaults(func=find_apps)
register_parser(parser_find_apps, subparsers_action=subparsers_find, categories='exec')
parser_find_globalworkflows = subparsers_find.add_parser(
'globalworkflows',
help=fill('List available global workflows'),
description=fill('Finds global workflows subject to the given search parameters. Use --category to restrict by a category; '
'common categories are available as tab completions and can be listed with --category-help.'),
parents=[stdout_args, json_arg, delim_arg, env_args],
prog='dx find globalworkflows'
)
parser_find_globalworkflows.add_argument('--name', help='Name of the workflow')
parser_find_globalworkflows.add_argument('--category', help='Category of the workflow').completer = ListCompleter(APP_CATEGORIES)
parser_find_globalworkflows.add_argument('--category-help',
help='Print a list of common global workflow categories',
nargs=0,
action=PrintCategoryHelp)
parser_find_globalworkflows.add_argument('-a', '--all', help='Return all versions of each workflow', action='store_true')
parser_find_globalworkflows.add_argument('--unpublished', help='Return only unpublished workflows (if omitted, returns only published workflows)', action='store_true')
parser_find_globalworkflows.add_argument('--billed-to', help='User or organization responsible for the workflow')
parser_find_globalworkflows.add_argument('--creator', help='Creator of the workflow version')
parser_find_globalworkflows.add_argument('--developer', help='Developer of the workflow')
parser_find_globalworkflows.add_argument('--created-after', help='Date (e.g. 2012-01-01) or integer timestamp after which the workflow version was created (negative number means ms in the past, or use suffix s, m, h, d, w, M, y)')
parser_find_globalworkflows.add_argument('--created-before', help='Date (e.g. 2012-01-01) or integer timestamp before which the workflow version was created (negative number means ms in the past, or use suffix s, m, h, d, w, M, y)')
parser_find_globalworkflows.add_argument('--mod-after', help='Date (e.g. 2012-01-01) or integer timestamp after which the workflow was last modified (negative number means ms in the past, or use suffix s, m, h, d, w, M, y)')
parser_find_globalworkflows.add_argument('--mod-before', help='Date (e.g. 2012-01-01) or integer timestamp before which the workflow was last modified (negative number means ms in the past, or use suffix s, m, h, d, w, M, y)')
parser_find_globalworkflows.set_defaults(func=find_global_workflows)
register_parser(parser_find_globalworkflows, subparsers_action=subparsers_find, categories='exec')
parser_find_jobs = subparsers_find.add_parser(
'jobs',
help=fill('List jobs in the current project'),
description=fill('Finds jobs subject to the given search parameters. By default, output is formatted to show the '
'last several job trees that you\'ve run in the current project.'),
parents=[find_executions_args, stdout_args, json_arg, no_color_arg, delim_arg, env_args,
find_by_properties_and_tags_args],
formatter_class=argparse.RawTextHelpFormatter,
conflict_handler='resolve',
prog='dx find jobs'
)
add_find_executions_search_gp(parser_find_jobs)
parser_find_jobs.set_defaults(func=find_executions, classname='job')
parser_find_jobs.completer = DXPathCompleter(expected='project')
register_parser(parser_find_jobs, subparsers_action=subparsers_find, categories='exec')
parser_find_analyses = subparsers_find.add_parser(
'analyses',
help=fill('List analyses in the current project'),
description=fill('Finds analyses subject to the given search parameters. By default, output is formatted to show '
'the last several job trees that you\'ve run in the current project.'),
parents=[find_executions_args, stdout_args, json_arg, no_color_arg, delim_arg, env_args,
find_by_properties_and_tags_args],
formatter_class=argparse.RawTextHelpFormatter,
conflict_handler='resolve',
prog='dx find analyses'
)
add_find_executions_search_gp(parser_find_analyses)
parser_find_analyses.set_defaults(func=find_executions, classname='analysis')
parser_find_analyses.completer = DXPathCompleter(expected='project')
register_parser(parser_find_analyses, subparsers_action=subparsers_find, categories='exec')
parser_find_executions = subparsers_find.add_parser(
'executions',
help=fill('List executions (jobs and analyses) in the current project'),
description=fill('Finds executions (jobs and analyses) subject to the given search parameters. By default, output '
'is formatted to show the last several job trees that you\'ve run in the current project.'),
parents=[find_executions_args, stdout_args, json_arg, no_color_arg, delim_arg, env_args,
find_by_properties_and_tags_args],
formatter_class=argparse.RawTextHelpFormatter,
conflict_handler='resolve',
prog='dx find executions'
)
add_find_executions_search_gp(parser_find_executions)
parser_find_executions.set_defaults(func=find_executions, classname=None)
parser_find_executions.completer = DXPathCompleter(expected='project')
register_parser(parser_find_executions, subparsers_action=subparsers_find, categories='exec')
parser_find_data = subparsers_find.add_parser(
'data',
help=fill('List data objects in the current project'),
description=fill('Finds data objects subject to the given search parameters. By default, restricts the search to '
'the current project if set. To search over all projects (excluding public projects), use '
'--all-projects (overrides --path and --norecurse).'),
parents=[stdout_args, json_arg, no_color_arg, delim_arg, env_args, find_by_properties_and_tags_args],
prog='dx find data'
)
parser_find_data.add_argument('--class', dest='classname', choices=['record', 'file', 'gtable', 'applet', 'workflow'], help='Data object class', metavar='{record,file,applet,workflow}')
parser_find_data.add_argument('--state', choices=['open', 'closing', 'closed', 'any'], help='State of the object')
parser_find_data.add_argument('--visibility', choices=['hidden', 'visible', 'either'], default='visible', help='Whether the object is hidden or not')
parser_find_data.add_argument('--name', help='Name of the object')
parser_find_data.add_argument('--type', help='Type of the data object')
parser_find_data.add_argument('--link', help='Object ID that the data object links to')
parser_find_data.add_argument('--all-projects', '--allprojects', help='Extend search to all projects (excluding public projects)', action='store_true')
parser_find_data.add_argument('--project', help=argparse.SUPPRESS)
parser_find_data.add_argument('--folder', help=argparse.SUPPRESS).completer = DXPathCompleter(expected='folder')
parser_find_data.add_argument('--path', help='Project and/or folder in which to restrict the results',
metavar='PROJECT:FOLDER').completer = DXPathCompleter(expected='folder')
parser_find_data.add_argument('--norecurse', dest='recurse', help='Do not recurse into subfolders', action='store_false')
parser_find_data.add_argument('--mod-after', help='Date (e.g. 2012-01-01) or integer timestamp after which the object was last modified (negative number means ms in the past, or use suffix s, m, h, d, w, M, y)')
parser_find_data.add_argument('--mod-before', help='Date (e.g. 2012-01-01) or integer timestamp before which the object was last modified (negative number means ms in the past, or use suffix s, m, h, d, w, M, y)')
parser_find_data.add_argument('--created-after', help='Date (e.g. 2012-01-01) or integer timestamp after which the object was created (negative number means ms in the past, or use suffix s, m, h, d, w, M, y)')
parser_find_data.add_argument('--created-before', help='Date (e.g. 2012-01-01) or integer timestamp before which the object was created (negative number means ms in the past, or use suffix s, m, h, d, w, M, y)')
parser_find_data.add_argument('--region', help='Restrict the search to the provided region')
parser_find_data.set_defaults(func=find_data)
register_parser(parser_find_data, subparsers_action=subparsers_find, categories=('data', 'metadata'))
parser_find_projects = subparsers_find.add_parser(
'projects',
help=fill('List projects'),
description=fill('Finds projects subject to the given search parameters. Use the --public flag to list all public '
'projects.'),
parents=[stdout_args, json_arg, delim_arg, env_args, find_by_properties_and_tags_args, contains_phi],
prog='dx find projects'
)
parser_find_projects.add_argument('--name', help='Name of the project')
parser_find_projects.add_argument('--level', choices=['VIEW', 'UPLOAD', 'CONTRIBUTE', 'ADMINISTER'],
help='Minimum level of permissions expected')
parser_find_projects.add_argument('--public',
help='Include ONLY public projects (will automatically set --level to VIEW)',
action='store_true')
parser_find_projects.add_argument('--created-after',
help='Date (e.g. 2012-01-01) or integer timestamp after which the project was ' +
| |
"""
Script to help you train the model locally.
Otherwise, I would recommend you to train the model on the Kaggle notebook.
https://www.kaggle.com/blaisewang/topic-label-generation
"""
import csv
import math
import time
import gensim
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import tensorflow as tf
from nltk.translate import bleu_score, gleu_score, nist_score
from rouge import Rouge
from sklearn.model_selection import train_test_split
from models.pre_bi_gru_attn import Encoder, Decoder
# enable eager execution for TensorFlow < 2.0
tf.compat.v1.enable_eager_execution()
# data_15 means threshold value is 1.5
path_to_file = "./input/data_15.csv"
# word embedding
embedding_size = 300
# True for applying the early stopping
early_stopping = True
# True for applying the attention mechanism
decoder_attention = Decoder.attention_mechanism
# True for applying the pre-trained word2vec
pre_trained_word2vec = Encoder.pre_trained_word2vec
# load the word2vec model
if pre_trained_word2vec and "model" not in locals():
model = gensim.models.KeyedVectors.load_word2vec_format("./word2vec/GoogleNews-vectors-negative300.bin",
binary=True)
vocab = model.vocab
embedding_size = 300
token_index = {0: "<pad>", 1: "<start>", 2: "<end>", 3: "<unk>"}
# word vectors for tokens
token_vector = {"<start>": tf.ones(embedding_size),
"<end>": tf.negative(tf.ones(embedding_size)),
"<unk>": tf.zeros(embedding_size),
"<pad>": tf.tile([0.5], [embedding_size])}
# function for pre-processing the sentences
def preprocess_sentence(sent):
# Google pre-trained word2vec model uses _ instead of -
sent = sent.replace("-", "_")
if pre_trained_word2vec:
return "<start> " + " ".join(topic if topic in vocab else "<unk>" for topic in sent.split()) + " <end>"
return "<start> " + sent + " <end>"
# function for loading and split the dataset
def create_dataset(path):
topics = []
labels = []
with open(path, "r") as csv_data:
reader = csv.reader(csv_data)
# skip header
next(reader, None)
for row in reader:
topic_str = preprocess_sentence(row[0])
label_str = preprocess_sentence(row[1])
if pre_trained_word2vec:
if all(label in token_vector for label in label_str.split()):
continue
topics.append(topic_str)
labels.append(label_str)
return topics, labels
# max sentence length
def max_length(vectors):
return max(len(vector) for vector in vectors)
# tokenize the sentence
def tokenize(lang):
lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(filters="")
lang_tokenizer.fit_on_texts(lang)
indices_list = lang_tokenizer.texts_to_sequences(lang)
indices_list = tf.keras.preprocessing.sequence.pad_sequences(indices_list, padding="post")
return indices_list, lang_tokenizer
# {topic: [label_1, label_2, ...]}
def create_reference_dict(inputs, targets):
ref_dict = {}
for topic, label in zip(inputs, targets):
if topic not in ref_dict:
ref_dict[topic] = []
ref_dict[topic].append(label)
return ref_dict
# convert word index to vector
def index2vec(index, tokenizer):
if index <= 3:
return token_vector[token_index[index]]
return model.word_vec(tokenizer.index_word[index])
# convert a list of indices to vectors
def indices2vec(indices, tokenizer):
return [index2vec(int(index), tokenizer) for index in indices]
# input sequence to input & target vectors
def input2vec(data):
inputs = []
targets = []
for input_seq in data:
for target_seq in reference_dict[input_seq]:
inputs.append(input_seq)
targets.append(target_seq)
inputs = input_tokenizer.texts_to_sequences(inputs)
targets = target_tokenizer.texts_to_sequences(targets)
inputs = tf.keras.preprocessing.sequence.pad_sequences(inputs, maxlen=max_length_inp, padding="post")
targets = tf.keras.preprocessing.sequence.pad_sequences(targets, maxlen=max_length_target, padding="post")
return inputs, targets
# creating cleaned input, output pairs
input_lang, target_lang = create_dataset(path_to_file)
reference_dict = create_reference_dict(input_lang, target_lang)
input_vectors, input_tokenizer = tokenize(input_lang)
target_vectors, target_tokenizer = tokenize(target_lang)
# calculate max_length of the vectors
max_length_inp, max_length_target = max_length(input_vectors), max_length(target_vectors)
# creating training, val, test sets using an 70-20-10 split
input_train, input_test = train_test_split(list(reference_dict.keys()), test_size=0.3)
input_val, input_test = train_test_split(input_test, test_size=0.33)
train_vocab = set([word for sentence in input_train for word in sentence.split()])
test_vocab = set([word for sentence in input_test for word in sentence.split()])
intersect_vocab = train_vocab.intersection(test_vocab)
print("%.2f%% of words in the test set are unknown" % ((1 - len(intersect_vocab) / len(test_vocab)) * 100))
input_train, target_train = input2vec(input_train)
input_val, target_val = input2vec(input_val)
input_test, target_test = input2vec(input_test)
BATCH_SIZE = 64
buffer_size = len(input_train)
vocab_inp_size = len(input_tokenizer.word_index) + 1
vocab_tar_size = len(target_tokenizer.word_index) + 1
train_steps_per_epoch = math.ceil(len(input_train) / BATCH_SIZE)
val_steps_per_epoch = math.ceil(len(input_val) / BATCH_SIZE)
test_steps_per_epoch = math.ceil(len(input_test) / BATCH_SIZE)
# train dataset
train_dataset = tf.data.Dataset.from_tensor_slices((input_train, target_train))
train_dataset = train_dataset.shuffle(buffer_size).batch(BATCH_SIZE)
# validation dataset
val_dataset = tf.data.Dataset.from_tensor_slices((input_val, target_val))
val_dataset = val_dataset.batch(BATCH_SIZE)
# test dataset
test_dataset = tf.data.Dataset.from_tensor_slices((input_test, target_test))
test_dataset = test_dataset.batch(BATCH_SIZE)
# RNN units dimension
RNN_DIMENSION = 1024
# initialise encoder decoder with pre_trained_word2vec flag
if pre_trained_word2vec:
encoder, decoder = Encoder(RNN_DIMENSION), Decoder(vocab_tar_size, RNN_DIMENSION)
else:
encoder = Encoder(vocab_inp_size, embedding_size, RNN_DIMENSION)
decoder = Decoder(vocab_tar_size, embedding_size, RNN_DIMENSION)
train_l = []
train_acc = []
val_l = []
val_acc = []
bleu_scores = []
gleu_scores = []
nist_scores = []
rouge_1l_dicts = []
# custom learning rate scheduler
class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, warm_up_steps=2000):
super(CustomSchedule, self).__init__()
self.warm_up_steps = warm_up_steps
self.d_model = int(4000 / warm_up_steps) * 128
self.d_model = tf.cast(self.d_model, tf.float32)
def get_config(self):
pass
def __call__(self, step):
arg1 = tf.math.rsqrt(step)
arg2 = step * (self.warm_up_steps ** -1.5)
return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)
learning_rate = CustomSchedule()
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9)
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction="none")
# loss function
def loss_function(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
# loss & accuracy for training
train_loss = tf.keras.metrics.Mean(name="train_loss")
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name="train_accuracy")
# loss & accuracy for validation and testing
test_loss = tf.keras.metrics.Mean(name="test_loss")
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name="test_accuracy")
# training function
def train_step(inputs, targets):
loss = 0
enc_output = None
with tf.GradientTape() as tape:
if pre_trained_word2vec:
inputs = [indices2vec(indices, input_tokenizer) for indices in inputs]
if decoder_attention:
enc_output, enc_hidden = encoder(inputs)
else:
enc_hidden = encoder(inputs)
dec_hidden = enc_hidden
if pre_trained_word2vec:
dec_input = tf.expand_dims(tf.expand_dims(token_vector["<start>"], 0), 0)
dec_input = tf.tile(dec_input, [targets.shape[0], 1, 1])
else:
dec_input = tf.expand_dims([target_tokenizer.word_index["<start>"]] * targets.shape[0], 1)
# teacher forcing - feeding the target as the next input
for t in range(1, targets.shape[1]):
# passing enc_output to the decoder
if decoder_attention:
predictions, dec_hidden, _ = decoder(dec_input, state=dec_hidden, encoder_output=enc_output)
else:
predictions, dec_hidden = decoder(dec_input, state=dec_hidden)
loss += loss_function(targets[:, t], predictions)
train_accuracy.update_state(targets[:, t], predictions)
# using teacher forcing
if pre_trained_word2vec:
dec_input = tf.expand_dims(indices2vec(targets[:, t], target_tokenizer), 1)
else:
dec_input = tf.expand_dims(targets[:, t], 1)
train_loss((loss / int(targets.shape[1])))
variables = encoder.trainable_variables + decoder.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
# validation & testing function
def test_step(inputs, targets):
loss = 0
enc_output = None
if pre_trained_word2vec:
inputs = [indices2vec(indices, input_tokenizer) for indices in inputs]
if decoder_attention:
enc_output, enc_hidden = encoder(inputs)
else:
enc_hidden = encoder(inputs)
dec_hidden = enc_hidden
predicted_labels = []
if pre_trained_word2vec:
dec_input = tf.expand_dims(tf.expand_dims(token_vector["<start>"], 0), 0)
dec_input = tf.tile(dec_input, [targets.shape[0], 1, 1])
else:
dec_input = tf.expand_dims([target_tokenizer.word_index["<start>"]] * targets.shape[0], 1)
for t in range(1, max_length_target):
# passing enc_output to the decoder
if decoder_attention:
predictions, dec_hidden, _ = decoder(dec_input, state=dec_hidden, encoder_output=enc_output)
else:
predictions, dec_hidden = decoder(dec_input, state=dec_hidden)
loss += loss_function(targets[:, t], predictions)
test_accuracy.update_state(targets[:, t], predictions)
predicted = tf.math.argmax(predictions, axis=1)
predicted_labels.append(list(predicted.numpy()))
if pre_trained_word2vec:
dec_input = tf.expand_dims(indices2vec(predicted, target_tokenizer), 1)
else:
dec_input = tf.expand_dims(predicted, 1)
test_loss((loss / int(targets.shape[1])))
# result for evaluation
result = []
# rotate the predicted matrix
for label in zip(*predicted_labels):
result.append([])
for value in label:
if value in (0, 2):
break
result[-1].append(value)
return result
# split input and remove <start> & <end> tokens
def word_split(sent):
return [label.split()[1:-1] for label in reference_dict[sent]]
# sum the rouge score
def rouge_sum_score(rouge_dict):
return sum(value for fpr in rouge_dict.values() for value in fpr.values())
# format the rouge dictionary for output
def rouge_dict_format(rouge_dict):
return "{rouge-1: {f: %f, p: %f, r: %f}, rouge-l: {f: %f, p: %f, r: %f}}" % (
rouge_dict["rouge-1"]["f"], rouge_dict["rouge-1"]["p"], rouge_dict["rouge-1"]["r"],
rouge_dict["rouge-l"]["f"], rouge_dict["rouge-l"]["p"], rouge_dict["rouge-l"]["r"])
# BLEU-1, GLEU-1, NIST-1, ROUGE-1 & ROUGE-L evaluation metrics
def evaluation_metrics(dataset, steps, size):
references = []
hypotheses = []
rouge = Rouge()
rouge_dict = {"rouge-1": {"f": 0.0, "p": 0.0, "r": 0.0},
"rouge-2": {"f": 0.0, "p": 0.0, "r": 0.0},
"rouge-l": {"f": 0.0, "p": 0.0, "r": 0.0}}
# make references & hypotheses lists
for inputs, targets in dataset.take(steps):
for labels in target_tokenizer.sequences_to_texts(test_step(inputs, targets)):
if len(labels) > 0:
hypotheses.append(labels.split())
else:
hypotheses.append([""])
for labels in input_tokenizer.sequences_to_texts(inputs.numpy()):
references.append(word_split(labels))
for index, hypothesis in enumerate(hypotheses):
max_score = {"rouge-1": {"f": 0.0, "p": 0.0, "r": 0.0},
"rouge-2": {"f": 0.0, "p": 0.0, "r": 0.0},
"rouge-l": {"f": 0.0, "p": 0.0, "r": 0.0}}
# one hypothesis may have several references
for reference in references[index]:
try:
rouge_score = rouge.get_scores(" ".join(hypothesis), " ".join(reference))[0]
# keep the best score
if rouge_sum_score(rouge_score) > rouge_sum_score(max_score):
max_score = rouge_score
except ValueError:
pass
for method_key in rouge_dict:
# fpr for traversing f1 precision recall
for fpr in rouge_dict[method_key]:
rouge_dict[method_key][fpr] += max_score[method_key][fpr]
# average
for method_key in rouge_dict:
for fpr in rouge_dict[method_key]:
rouge_dict[method_key][fpr] /= size
bleu = bleu_score.corpus_bleu(references, hypotheses, weights=(1,))
gleu = gleu_score.corpus_gleu(references, hypotheses, max_len=1)
nist = nist_score.corpus_nist(references, hypotheses, n=1)
print("BLEU-1 Score: %.4f" % bleu)
print("GLEU-1 Score: %.4f" % gleu)
print("NIST-1 Score: %.4f" % nist)
print("ROUGE Scores: %s" % rouge_dict_format(rouge_dict))
return bleu, gleu, nist, rouge_dict
# plot a single subplot
def single_plot(ax, epochs, data, title):
ax.plot(epochs, data)
ax.set_xlabel("Epoch")
ax.set_title(title)
# plot loss, accuracy, BLEU-1, GLEU-1, NIST-1, ROUGE-1, ROUGE-L result
def plot_result(t_l, t_acc, v_l, v_acc, bleu, gleu, nist, rouge_1l):
epochs = list(range(1, len(t_l) + 1))
plt.figure(figsize=(16, 16))
ax1 = plt.subplot2grid((4, 6), (0, 0), colspan=3)
ax1.plot(epochs, t_l, label="Train Loss")
ax1.plot(epochs, v_l, label="Valid Loss")
ax1.legend()
ax1.set_ylim([0, 3])
ax1.set_xlabel("Epoch")
ax1.set_title("Loss")
ax2 = plt.subplot2grid((4, 6), (0, 3), colspan=3)
ax2.plot(epochs, t_acc, label="Train Accuracy")
ax2.plot(epochs, v_acc, label="Valid Accuracy")
ax2.legend()
ax2.set_ylim([0, 1])
ax2.set_xlabel("Epoch")
ax2.set_title("Accuracy")
rouge_1_f = [rouge_dict["rouge-1"]["f"] for rouge_dict in rouge_1l]
rouge_1_p = [rouge_dict["rouge-1"]["p"] for rouge_dict in rouge_1l]
rouge_1_r = [rouge_dict["rouge-1"]["r"] for rouge_dict | |
<reponame>Greco412/discordbot
import asyncio
import atexit
import io
import json
import random
import sqlite3
import time
from json import JSONDecodeError
from xml.dom import minidom
import math
import discord
import requests
from buttplug.client import (ButtplugClient, ButtplugClientConnectorError,
ButtplugClientDevice,
ButtplugClientWebsocketConnector)
from PIL import Image, ImageDraw, ImageFont
import commands
import permissions
client = None
def getResponse(endpoint, tags, limit=20):
headers = {"user-agent":"[^_^]/1.0"}
t = tags[0]
for x in tags[1:]:
t+="+"+x
session = requests.Session()
session.headers.update(headers)
response = session.get(endpoint.format(limit, t))
return response
def getData(endpoint, tags, limit=20): # json
response = getResponse(endpoint, tags, limit=limit)
j = response.json()
return j
def getDOM(endpoint, tags, limit=20):
response = getResponse(endpoint, tags, limit=limit)
return minidom.parseString(response.text)
def downloadImage(url):
headers = {"user-agent":"[^_^]/1.0"}
session = requests.Session()
session.headers.update(headers)
file_response = session.get(url)
file_extension = url[url.rfind(".")+1:]
#https://stackoverflow.com/a/39217788
data = file_response.content
return io.BufferedReader(io.BytesIO(data)), file_extension
def gelbooru(tags, return_tags=False):
tags.append("-loli")
tags.append("-shota")
j = getData("http://gelbooru.com/index.php?page=dapi&limit={0}&s=post&&q=index&json=1&tags={1}", tags)
if not return_tags:
target = j[random.randint(0, len(j)-1)]['file_url']
return downloadImage(target)
else:
i = random.randint(0, len(j)-1)
target = j[i]['file_url']
return (downloadImage(target), j[i]['tags'])
#tags should be a list of desired tags
def e621(tags, return_tags=False):
j = getData("http://e621.net/posts.json?limit={0}&tags={1}", tags)['posts']
if not return_tags:
i = random.randint(0, len(j)-1)
target = j[i]['file']['url']
if target is not None:
return downloadImage(target)
else:
print(j[i])
return None
else:
i = random.randint(0, len(j)-1)
target = j[i]['file']['url']
tags = []
for t in ['general', 'species', 'artist', 'character', 'copyright', 'lore', 'meta']:
for tag in j[i]['tags'][t]:
tags.append(tag)
return (downloadImage(target), tags)
def rule34(tags, return_tags=False):
j = getDOM("https://rule34.xxx/index.php?page=dapi&s=post&q=index&limit={0}&tags={1}", tags).getElementsByTagName("post")
if not return_tags:
target = j[random.randint(0, len(j)-1)].attributes['file_url'].value
return downloadImage(target)
else:
i = random.randint(0, len(j)-1)
target = j[i].attributes['file_url'].value
return (downloadImage(target), j[i].attributes['tags'].value.split(" ")[1:-1])
functionMap = {"e621":e621, "gelbooru":gelbooru, "rule34":rule34}
SUPPORTED = str(list(functionMap.keys()))
async def postRandom(channel, booru, tags):
global SUPPORTED
try:
data, extension = functionMap[booru](tags)
async with channel.typing():
await channel.send(file=discord.File(data, filename="fur."+extension))
data.close()
except IndexError:
await channel.send("Oopsie woopsie Uwu. " + booru + " returned no search results.")
except KeyError:
await channel.send("Oopsie woopsie. " + booru + " is not supported.\nSupported boorus: "+ SUPPORTED)
except JSONDecodeError:
await channel.send("Oopsie Woopsie. Failed to decode json. " + booru + " returned an empty response, or something weird")
except Exception as e:
await channel.send("Oopsie woopsie Uwu. One of many possible disasters has occured. Try `!booru help`\nException: " + type(e).__name__)
print(e) #hopefully this does something useful
@commands.registerEventHandler(name="booru", exclusivity="global")
async def booru(triggerMessage):
global SUPPORTED
tokens = triggerMessage.content.split()
#if (len(tokens) == 1 or tokens[1].lower() == "help"):
if (len(tokens) <= 2):
await triggerMessage.channel.send( "Syntax is `!booru booru_name tag0 ...`\nCurrently supported boorus: " + SUPPORTED)
return
if (triggerMessage.channel.type is discord.ChannelType.text and not triggerMessage.channel.is_nsfw()):
tokens.append("rating:safe")
else:
tokens.extend(["-young", "-scat","-fart"]) #Anti trash
await postRandom(triggerMessage.channel, tokens[1], tokens[2:8]) # chop off extra tags
# TODO: Filter remaining blacklist tags from results
return
@commands.registerEventHandler(name="unbusy")
async def unbusy(triggerMessage):
global busy
busy = False
@commands.registerEventHandler(name="secret", exclusivity="global")
async def postsecret(triggerMessage):
data_in = e621(["anthro"])
if data_in is not None:
data, _ = addsecret(data_in[0])
await triggerMessage.channel.send(file=discord.File(data, filename="secret.png"))
data.close()
else:
await triggerMessage.channel.send("Failed to generate image")
def addsecret(data_in):
chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghkmnopqrstuvwxyz1234567890"
pw = ""
for i in range(6):
pw += random.choice(chars)
#get an image from e621
#we can mess with tags later
bg = Image.open(data_in)
#make sure the image has an alpha channel so alpha_composite will work later
bg = bg.convert(mode="RGBA")
bg.putalpha(255)
# magic formula: size = x/y/z
# where x is the x dimension of the image
# y is the ratio of image width to text width
# and z is the ratio of pixels to points
# this finds the font size to produce text of the correct pixel width
fontsize = int(bg.size[0]/8/5.3)
fontsize = (fontsize, 12)[fontsize < 12]
# font can be changed later
font = ImageFont.truetype("arial.ttf", fontsize)
#get the dimensions of rendered text
x,y = font.getsize(pw)
img = Image.new("RGBA", (x+6, y+6), (0,0,0,0))
draw = ImageDraw.Draw(img)
#draw the text on a canvas, rotate, then get new dimensions
draw.text((0, 1), pw, font=font, fill=(0, 0, 0, 127))
draw.text((2, 1), pw, font=font, fill=(0, 0, 0, 127))
draw.text((1, 0), pw, font=font, fill=(0, 0, 0, 127))
draw.text((1, 2), pw, font=font, fill=(0, 0, 0, 127))
draw.text((1,1), pw, fill=(255, 255, 255, 127), font=font)
img = img.rotate(random.randrange(0,360), resample=Image.NEAREST, expand=1)
#randomly pad the text image to align the text with a random location on the background image
x,y = img.size
xb,yb = bg.size
if xb-x <= 0 or yb-y <= 0:
print("Too small image {} by {}, text size {} by {}, {} points".format(xb, yb, x, y, fontsize))
return None
x1 = random.randrange(0, xb-x)
x2 = xb-x1
y1 = random.randrange(0, yb-y)
y2 = yb-y1
img = img.crop((-x1, -y1, x2+x, y2+y))
#composite images and save
bg.alpha_composite(img)
output = io.BytesIO()
bg.save(output, format="PNG")
output.seek(0)
return (io.BufferedReader(output), pw)
@commands.registerEventHandler(name="doge", exclusivity="global")
async def doge(triggerMessage):
global SUPPORTED
tokens = triggerMessage.content.split()
if (len(tokens) == 1):
site = "e621"
else:
site = tokens[1]
if len(tokens) <= 2:
tags = ["dog", "rating:explicit"]
else:
tags = tokens[2:8]
tags.extend(["-young", "-scat","-fart"]) #Anti trash
x,y = functionMap[site](tags, return_tags=True)
data = makedoge(x[0], y)
if data is not None:
await triggerMessage.channel.send(file=discord.File(data, filename="doge.png"))
data.close()
else:
await triggerMessage.channel.send("Failed to generate image")
def makedoge(data, tags):
colors = ["Red", "Green", "GreenYellow", "Magenta", "Cyan", "Blue", "White", "Black", "Orange", "Yellow", "Grey"]
colors = random.sample(colors, 8)
# this lets us take either the string straight from the json or an already split up list
if type(tags) == str:
tags = tags.split(" ")
img = Image.open(data)
draw = ImageDraw.Draw(img)
phrases = ["wow."]
tags = random.sample(tags, 5) # pick 5 tags at random
phrases.append("such {}".format(tags[0]))
phrases.append("much {}".format(tags[1]))
phrases.append("very {}".format(tags[2]))
phrases.append("so {}".format(tags[3]))
phrases.append("how {}".format(tags[4]))
phrases.append("Cool")
phrases.append("neat")
random.shuffle(phrases)
xs = [int(img.size[0]*(i/10))+(i==0)*10 for i in range(0,9)] # fun list iteration
ys = [int(img.size[1]*(i/9)) for i in range(0,9)]
random.shuffle(xs)
font = ImageFont.truetype(font="comic.ttf", size=int((img.size[0], img.size[1])[img.size[0] < img.size[1]]/6/5.3))
for i in range(len(phrases)):
draw.text((xs[i],ys[i]), phrases[i], fill=colors[i], font=font)
output = io.BytesIO()
img.save(output, format="PNG")
output.seek(0)
return io.BufferedReader(output)
buttplugClients = {}
@commands.registerEventHandler(name="keister", exclusivity="global")
async def keister(triggerMessage):
global buttplugClients
if triggerMessage.author.id in buttplugClients:
buttplugClients[triggerMessage.author.id].stop_scanning()
buttplugClients[triggerMessage.author.id].disconnect()
buttplugClients[triggerMessage.author.id] = ButtplugClient(triggerMessage.author.name)
connector = ButtplugClientWebsocketConnector(triggerMessage.content.split()[1])
await buttplugClients[triggerMessage.author.id].connect(connector)
await buttplugClients[triggerMessage.author.id].start_scanning()
await triggerMessage.channel.send("Keistered!")
@commands.registerEventHandler(name="unkeister", exclusivity="global")
async def unkeister(triggerMessage):
global buttplugClients
if triggerMessage.author.id in buttplugClients:
for devid in buttplugClients[triggerMessage.author.id].devices:
await buttplugClients[triggerMessage.author.id].devices[devid].send_stop_device_cmd()
await buttplugClients[triggerMessage.author.id].stop_scanning()
await buttplugClients[triggerMessage.author.id].disconnect()
buttplugClients.pop(triggerMessage.author.id)
await triggerMessage.channel.send("Unkeistered!")
class BooruGame:
def __init__(self, tags, url):
self.userScores = {}
self.tags = tags
self.previousGuesses = []
self.timeRemaining = 30 + 2 * len(tags)
self.url = url
def wasguessed(self, guess):
return guess in self.previousGuesses
def guess(self, guess, user):
guess = guess.replace("`", "").casefold()
if user not in self.userScores:
self.userScores[user] = []
if guess in self.previousGuesses:
self.timeRemaining -= 1
return "`" + guess + "` was already guessed."
if guess in self.tags:
self.userScores[user].append(guess)
self.previousGuesses.append(guess)
self.tags.remove(guess)
self.timeRemaining += 5
return "`" + guess + "`: Correct! " + str(1) + " points. " + str(len(self.tags)) + " tags left."
else:
return "`" + guess + "`: Nope!"
gameInstances = {}
gameHistoryDB = None
@atexit.register
def exit():
gameHistoryDB.close()
gameHistoryDB = sqlite3.connect("boorugame.db")
gameHistoryDBCursor= gameHistoryDB.cursor()
gameHistoryDBCursor.execute("create table if not exists Games (time integer, image text, channel text, remainingTags text, guessedTags text, results text, winner text, playercount integer)")
gameHistoryDB.commit()
def nameFromId(channel, id):
member = channel.guild.get_member(id)
name = member.name
if member.nick is not None:
name = member.nick
return name
async def endGame(channel):
global gameInstances
game = gameInstances[channel]
del gameInstances[channel]
endMsg = "Game Complete!\n" + "Unguessed tags were: `" + str(game.tags)+"`\n" + "Guessed tags were: `" + str(game.previousGuesses) + "`"
await channel.send(endMsg)
scoreDict = game.userScores
scores = sorted([(k, len(scoreDict[k])) for k in scoreDict], key=lambda tup: tup[1], reverse=True)
if len(scores) > 0:
scoreString = ""
for id, score in scores:
name = nameFromId(channel, id)
scoreString += "User " + str(name) + " scored " + str(score) + "\n"
name = nameFromId(channel, scores[0][0])
await channel.send(scoreString + "\n" + str(name) + " wins!")
remainingJson = json.dumps(game.tags)
guessedJson = json.dumps(game.previousGuesses)
resultJson = json.dumps(game.userScores)
gameData = (time.time(), game.url, channel.id, remainingJson, guessedJson, resultJson, scores[0][0], len(scores))
gameHistoryDBCursor.execute("INSERT INTO Games VALUES (?,?,?,?,?,?,?,?)", gameData)
gameHistoryDB.commit()
@commands.registerEventHandler(triggerType="\\timeTick", name="boorugametick")
async def updateTime():
global gameInstances
gamesToStop = []
for c in gameInstances:
gameInstances[c].timeRemaining -= 1
if gameInstances[c].timeRemaining <= 0:
gamesToStop.append(c)
print("Stopping " + str(c))
for c in gamesToStop:
await c.send("Timed out!")
await endGame(c)
def lookup_tag(tag):
global headers
session = requests.Session()
session.headers.update(headers)
response = session.get("http://e621.net/tag/index.json?name=" + tag)
j = response.json()
if (len(j) == 0):
return 0
else:
return | |
appropriate file extension (same as format) will be added.
(2) File extension does not match format specifier.
The file extension will be replaced by the one specified in format.
"""
file_basename, file_extension = os.path.splitext(self.filename)
if "format" in self.parameters:
if file_extension != self.parameters["format"]:
self.filename = '.'.join([file_basename,
self.parameters["format"]])
elif not file_extension:
self.filename = '.'.join([self.filename,
self.parameters["format"]])
class Caption(aspecd.utils.Properties):
"""
Caption for figures.
Attributes
----------
title: :class:`str`
usually one sentence describing the intent of the figure
Often plotted bold-face in a figure caption.
text: :class:`str`
additional text directly following the title
Contains more information about the plot. Ideally, a figure caption
is self-contained such that it explains the figure sufficiently to
understand its intent and content without needing to read all the
surrounding text.
parameters: :class:`list`
names of parameters that should be included in the figure caption
Usually, these parameters get included at the very end of a figure
caption.
"""
def __init__(self):
super().__init__()
self.title = ''
self.text = ''
self.parameters = []
class PlotProperties(aspecd.utils.Properties):
"""
Properties of a plot, defining its appearance.
Attributes
----------
figure : :class:`aspecd.plotting.FigureProperties`
Properties of the figure as such
For the properties that can be set this way, see the documentation
of the :class:`aspecd.plotting.FigureProperties` class.
legend : :class:`aspecd.plotting.LegendProperties`
Properties of the legend.
For the properties that can be set this way, see the documentation
of the :class:`aspecd.plotting.LegendProperties` class.
zero_lines : :class:`aspecd.plotting.LineProperties`
Properties of the zero lines.
For the properties that can be set this way, see the documentation
of the :class:`aspecd.plotting.LineProperties` class.
Default values for the zero lines are:
* color: #cccccc
Raises
------
aspecd.exceptions.MissingPlotterError
Raised if no plotter is provided.
"""
def __init__(self):
super().__init__()
self.figure = FigureProperties()
self.legend = LegendProperties()
self.zero_lines = LineProperties()
# Set default properties
self.zero_lines.color = '#cccccc'
def apply(self, plotter=None):
"""
Apply properties to plot.
In this generic class having only figure properties, only these
properties are set. Classes derived from
:class:`aspecd.plotting.PlotProperties` need to take care of
setting all available properties.
Parameters
----------
plotter: :class:`aspecd.plotting.Plotter`
Plotter the properties should be applied to.
Raises
------
aspecd.exceptions.MissingPlotterError
Raised if no plotter is provided.
"""
if not plotter:
raise aspecd.exceptions.MissingPlotterError
self.figure.apply(figure=plotter.figure)
class SinglePlotProperties(PlotProperties):
"""
Properties of a single plot, defining its appearance.
Attributes
----------
axes : :class:`aspecd.plotting.AxesProperties`
Properties of the axes.
For the properties that can be set this way, see the documentation
of the :class:`aspecd.plotting.AxesProperties` class.
grid : :class:`aspecd.plotting.GridProperties`
Properties of the grid.
For the properties that can be set this way, see the documentation
of the :class:`aspecd.plotting.GridProperties` class.
drawing : :class:`aspecd.plotting.DrawingProperties`
Properties of the line within a plot
For the properties that can be set this way, see the documentation
of the :class:`aspecd.plotting.DrawingProperties` class.
Raises
------
aspecd.exceptions.MissingPlotterError
Raised if no plotter is provided.
"""
def __init__(self):
super().__init__()
self.axes = AxesProperties()
self.grid = GridProperties()
self.drawing = DrawingProperties()
def apply(self, plotter=None):
"""
Apply properties to plot.
Parameters
----------
plotter: :class:`aspecd.plotting.SinglePlotter`
Plotter the properties should be applied to.
Raises
------
aspecd.exceptions.MissingPlotterError
Raised if no plotter is provided.
"""
super().apply(plotter=plotter)
self.axes.apply(axes=plotter.axes)
self.grid.apply(axes=plotter.axes)
if plotter.drawing:
self.drawing.apply(drawing=plotter.drawing)
class SinglePlot1DProperties(SinglePlotProperties):
"""
Properties of a 1D single plot, defining its appearance.
Attributes
----------
drawing : :class:`aspecd.plotting.LineProperties`
Properties of the line within a plot
For the properties that can be set this way, see the documentation
of the :class:`aspecd.plotting.LineProperties` class.
Raises
------
aspecd.exceptions.MissingPlotterError
Raised if no plotter is provided.
"""
def __init__(self):
super().__init__()
self.drawing = LineProperties()
class SinglePlot2DProperties(SinglePlotProperties):
"""
Properties of a 2D single plot, defining its appearance.
Attributes
----------
drawing : :class:`aspecd.plotting.SurfaceProperties`
Properties of the surface within a plot
For the properties that can be set this way, see the documentation
of the :class:`aspecd.plotting.SurfaceProperties` class.
Raises
------
aspecd.exceptions.MissingPlotterError
Raised if no plotter is provided.
"""
def __init__(self):
super().__init__()
self.drawing = SurfaceProperties()
class MultiPlotProperties(PlotProperties):
"""
Properties of a multiplot, defining its appearance.
Attributes
----------
axes : :class:`aspecd.plotting.AxesProperties`
Properties of the axes.
For the properties that can be set this way, see the documentation
of the :class:`aspecd.plotting.AxesProperties` class.
grid : :class:`aspecd.plotting.GridProperties`
Properties of the grid.
For the properties that can be set this way, see the documentation
of the :class:`aspecd.plotting.GridProperties` class.
drawings : :class:`list`
Properties of the lines within a plot.
Each element is a :obj:`aspecd.plotting.DrawingProperties` object
For the properties that can be set this way, see the documentation
of the :class:`aspecd.plotting.DrawingProperties` class.
Raises
------
aspecd.plotting.MissingPlotterError
Raised if no plotter is provided.
"""
def __init__(self):
super().__init__()
self.axes = AxesProperties()
self.grid = GridProperties()
self.drawings = []
def from_dict(self, dict_=None):
"""
Set attributes from dictionary.
The key ``drawing`` is handled in a special way: First of all,
:attr:`aspecd.plotting.MultiPlotProperties.drawing` is a list,
hence we need to iterate over the entries of the list. Furthermore,
a new element of the list is appended only if it does not exist
already.
As different MultiPlotter objects will use different properties
classes for their drawing, adding a new drawing is handled by a
separate method,
:meth:`aspecd.plotting.MultiPlotProperties.add_drawing`.
Additionally, each MultiPlotter class can use this method as well,
to add drawing properties for each plotted item.
Parameters
----------
dict_ : :class:`dict`
Dictionary containing information of a task.
Raises
------
aspecd.exceptions.MissingDictError
Raised if no dict is provided.
"""
if 'drawings' in dict_:
for idx in range(len(self.drawings), len(dict_['drawings'])):
self.add_drawing()
for idx, drawing in enumerate(dict_['drawings']):
self.drawings[idx].from_dict(drawing)
dict_.pop('drawings')
if dict_:
super().from_dict(dict_)
def add_drawing(self):
"""
Add a :obj:`aspecd.plotting.DrawingProperties` object to the list.
As different MultiPlotter objects will use different properties
classes for their drawing, adding a new drawing is handled by this
method. Additionally, each MultiPlotter class can use this method as
well, to add drawing properties for each plotted item.
.. note::
A note for developers: Concrete MultiPlotter classes will use
classes derived from :class:`aspecd.plotting.MultiPlotProperties`
for their ``properties`` property. These properties classes
should override this method to ensure the correct type of
:class:`aspecd.plotting.DrawingProperties` is instantiated.
Furthermore, make sure to set default values according to the
current cycler.
"""
drawing_properties = DrawingProperties()
self.drawings.append(drawing_properties)
def apply(self, plotter=None):
"""
Apply properties to plot.
Parameters
----------
plotter: :class:`aspecd.plotting.MultiPlotter`
Plotter the properties should be applied to.
Raises
------
aspecd.exceptions.MissingPlotterError
Raised if no plotter is provided.
"""
super().apply(plotter=plotter)
self.axes.apply(axes=plotter.axes)
self.grid.apply(axes=plotter.axes)
if hasattr(plotter, 'legend') and plotter.legend:
self.legend.apply(legend=plotter.legend)
if hasattr(plotter, 'drawings'):
for idx, drawing in enumerate(plotter.drawings):
self.drawings[idx].apply(drawing=drawing)
class MultiPlot1DProperties(MultiPlotProperties):
"""
Properties of a 1D multiplot, defining its appearance.
drawings : :class:`list`
Properties of the lines within a plot.
Each element is a :obj:`aspecd.plotting.LineProperties` object
For the properties that can be set this way, see the documentation
of the :class:`aspecd.plotting.LineProperties` class.
Raises
------
aspecd.exceptions.MissingPlotterError
Raised if no plotter is provided.
"""
def add_drawing(self):
"""
Add a :obj:`aspecd.plotting.LineProperties` object to the list.
The default properties are set as well, as obtained from
:obj:`matplotlib.pyplot.rcParams`. These contain at least colour,
width, marker, and style of a line.
"""
drawing_properties = LineProperties()
self._set_default_properties(drawing_properties)
self.drawings.append(drawing_properties)
def _set_default_properties(self, drawing_properties):
property_cycle = plt.rcParams['axes.prop_cycle'].by_key()
length_properties = len(property_cycle["color"])
idx = len(self.drawings)
for key, value in property_cycle.items():
setattr(drawing_properties, key, value[idx % length_properties])
for key in ['linewidth', 'linestyle', 'marker']:
rc_property = 'lines.' + key
if rc_property in plt.rcParams.keys():
setattr(drawing_properties, key, plt.rcParams[rc_property])
class CompositePlotProperties(PlotProperties):
"""
Properties of a composite plot, defining its appearance.
Attributes
----------
axes : :class:`aspecd.plotting.AxesProperties`
Properties for all axes of the CompositePlotter.
This property is used to set properties for all axes of a
CompositePlotter at once. This will override the settings of the
individual plotters.
For the properties that can be set this way, see the documentation
of the :class:`aspecd.plotting.AxesProperties` class.
Raises
------
aspecd.exceptions.MissingPlotterError
Raised if no plotter is provided.
"""
def __init__(self):
super().__init__()
self.axes = AxesProperties()
def apply(self, plotter=None):
"""
Apply properties to plot.
Parameters
----------
plotter: :class:`aspecd.plotting.CompositePlotter`
Plotter the properties should be applied to.
Raises
------
aspecd.exceptions.MissingPlotterError
Raised if no plotter is provided.
"""
super().apply(plotter=plotter)
if hasattr(plotter, 'axes'):
for axes in plotter.axes:
self.axes.apply(axes=axes)
class FigureProperties(aspecd.utils.Properties):
"""
Properties of a figure of a plot, i.e., the most general aspects.
Basically, the attributes are a subset of what :mod:`matplotlib` defines
for :obj:`matplotlib.figure.Figure` objects.
Attributes
----------
size: :class:`tuple`
Figure dimension (width, height) in inches.
2-tuple of floats
Default: 6.4, 4.8
dpi: :class:`float`
Figure resolution in dots |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.